1
2
3
4
5
6
7
8
9
10
11 package org.apache.hadoop.hbase.replication.master;
12
13 import com.google.common.base.Predicate;
14 import com.google.common.collect.ImmutableSet;
15 import com.google.common.collect.Iterables;
16 import com.google.common.collect.Sets;
17
18 import java.io.IOException;
19 import java.util.Collections;
20 import java.util.List;
21 import java.util.Set;
22
23 import org.apache.commons.logging.Log;
24 import org.apache.commons.logging.LogFactory;
25 import org.apache.hadoop.conf.Configuration;
26 import org.apache.hadoop.fs.FileStatus;
27 import org.apache.hadoop.hbase.Abortable;
28 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
29 import org.apache.hadoop.hbase.HConstants;
30 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
31 import org.apache.hadoop.hbase.classification.InterfaceAudience;
32 import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
33 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
34 import org.apache.hadoop.hbase.replication.ReplicationFactory;
35 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
36 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
37 import org.apache.zookeeper.KeeperException;
38
39
40
41
42
43 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
44 public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate {
45 private static final Log LOG = LogFactory.getLog(ReplicationHFileCleaner.class);
46 private ZooKeeperWatcher zkw;
47 private ReplicationQueuesClient rqc;
48 private boolean stopped = false;
49
50 @Override
51 public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
52
53
54 if (this.getConf() == null) {
55 return files;
56 }
57
58 final Set<String> hfileRefs;
59 try {
60
61
62 hfileRefs = loadHFileRefsFromPeers();
63 } catch (KeeperException e) {
64 LOG.warn("Failed to read hfile references from zookeeper, skipping checking deletable files");
65 return Collections.emptyList();
66 }
67 return Iterables.filter(files, new Predicate<FileStatus>() {
68 @Override
69 public boolean apply(FileStatus file) {
70 String hfile = file.getPath().getName();
71 boolean foundHFileRefInQueue = hfileRefs.contains(hfile);
72 if (LOG.isDebugEnabled()) {
73 if (foundHFileRefInQueue) {
74 LOG.debug("Found hfile reference in ZK, keeping: " + hfile);
75 } else {
76 LOG.debug("Did not find hfile reference in ZK, deleting: " + hfile);
77 }
78 }
79 return !foundHFileRefInQueue;
80 }
81 });
82 }
83
84
85
86
87
88
89 private Set<String> loadHFileRefsFromPeers() throws KeeperException {
90 Set<String> hfileRefs = Sets.newHashSet();
91 List<String> listOfPeers;
92 for (int retry = 0;; retry++) {
93 int v0 = rqc.getHFileRefsNodeChangeVersion();
94 hfileRefs.clear();
95 listOfPeers = rqc.getAllPeersFromHFileRefsQueue();
96 if (listOfPeers == null) {
97 LOG.debug("Didn't find any peers with hfile references, won't prevent any deletions.");
98 return ImmutableSet.of();
99 }
100 for (String id : listOfPeers) {
101 List<String> peerHFileRefs = rqc.getReplicableHFiles(id);
102 if (peerHFileRefs != null) {
103 hfileRefs.addAll(peerHFileRefs);
104 }
105 }
106 int v1 = rqc.getHFileRefsNodeChangeVersion();
107 if (v0 == v1) {
108 return hfileRefs;
109 }
110 LOG.debug(String.format("Replication hfile references node cversion changed from "
111 + "%d to %d, retry = %d", v0, v1, retry));
112 }
113 }
114
115 @Override
116 public void setConf(Configuration config) {
117
118 if (!(config.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
119 HConstants.REPLICATION_ENABLE_DEFAULT) && config.getBoolean(
120 HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
121 HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT))) {
122 LOG.warn(HConstants.REPLICATION_ENABLE_KEY
123 + " is not enabled so allowing all hfile references to be deleted. Better to remove "
124 + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS
125 + " configuration.");
126 return;
127 }
128
129
130 Configuration conf = new Configuration(config);
131 try {
132 setConf(conf, new ZooKeeperWatcher(conf, "replicationHFileCleaner", null));
133 } catch (IOException e) {
134 LOG.error("Error while configuring " + this.getClass().getName(), e);
135 }
136 }
137
138 @InterfaceAudience.Private
139 public void setConf(Configuration conf, ZooKeeperWatcher zk) {
140 super.setConf(conf);
141 try {
142 initReplicationQueuesClient(conf, zk);
143 } catch (IOException e) {
144 LOG.error("Error while configuring " + this.getClass().getName(), e);
145 }
146 }
147
148 private void initReplicationQueuesClient(Configuration conf, ZooKeeperWatcher zk)
149 throws ZooKeeperConnectionException, IOException {
150 this.zkw = zk;
151 this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, new WarnOnlyAbortable());
152 }
153
154 @Override
155 public void stop(String why) {
156 if (this.stopped) {
157 return;
158 }
159 this.stopped = true;
160 if (this.zkw != null) {
161 LOG.info("Stopping " + this.zkw);
162 this.zkw.close();
163 }
164 }
165
166 @Override
167 public boolean isStopped() {
168 return this.stopped;
169 }
170
171 @Override
172 public boolean isFileDeletable(FileStatus fStat) {
173 Set<String> hfileRefsFromQueue;
174
175
176 if (getConf() == null) {
177 return true;
178 }
179
180 try {
181 hfileRefsFromQueue = loadHFileRefsFromPeers();
182 } catch (KeeperException e) {
183 LOG.warn("Failed to read hfile references from zookeeper, skipping checking deletable "
184 + "file for " + fStat.getPath());
185 return false;
186 }
187 return !hfileRefsFromQueue.contains(fStat.getPath().getName());
188 }
189
190 private static class WarnOnlyAbortable implements Abortable {
191 @Override
192 public void abort(String why, Throwable e) {
193 LOG.warn("ReplicationHFileCleaner received abort, ignoring. Reason: " + why);
194 if (LOG.isDebugEnabled()) {
195 LOG.debug(e);
196 }
197 }
198
199 @Override
200 public boolean isAborted() {
201 return false;
202 }
203 }
204 }