1
2
3
4
5
6
7
8
9
10
11 package org.apache.hadoop.hbase.master.cleaner;
12
13 import static org.junit.Assert.assertEquals;
14 import static org.junit.Assert.assertFalse;
15 import static org.junit.Assert.assertTrue;
16 import static org.junit.Assert.fail;
17 import static org.mockito.Mockito.doThrow;
18 import static org.mockito.Mockito.spy;
19
20 import com.google.common.collect.Lists;
21
22 import java.io.IOException;
23 import java.lang.reflect.Field;
24 import java.util.ArrayList;
25 import java.util.Iterator;
26 import java.util.List;
27
28 import org.apache.commons.logging.Log;
29 import org.apache.commons.logging.LogFactory;
30 import org.apache.hadoop.conf.Configuration;
31 import org.apache.hadoop.fs.FileStatus;
32 import org.apache.hadoop.fs.FileSystem;
33 import org.apache.hadoop.fs.Path;
34 import org.apache.hadoop.hbase.Abortable;
35 import org.apache.hadoop.hbase.ChoreService;
36 import org.apache.hadoop.hbase.CoordinatedStateManager;
37 import org.apache.hadoop.hbase.HBaseTestingUtility;
38 import org.apache.hadoop.hbase.HConstants;
39 import org.apache.hadoop.hbase.Server;
40 import org.apache.hadoop.hbase.ServerName;
41 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
42 import org.apache.hadoop.hbase.client.ClusterConnection;
43 import org.apache.hadoop.hbase.replication.ReplicationException;
44 import org.apache.hadoop.hbase.replication.ReplicationFactory;
45 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
46 import org.apache.hadoop.hbase.replication.ReplicationPeers;
47 import org.apache.hadoop.hbase.replication.ReplicationQueues;
48 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
49 import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
50 import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
51 import org.apache.hadoop.hbase.replication.regionserver.Replication;
52 import org.apache.hadoop.hbase.testclassification.MasterTests;
53 import org.apache.hadoop.hbase.testclassification.SmallTests;
54 import org.apache.hadoop.hbase.util.Pair;
55 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
56 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
57 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
58 import org.apache.zookeeper.KeeperException;
59 import org.apache.zookeeper.data.Stat;
60 import org.junit.After;
61 import org.junit.AfterClass;
62 import org.junit.Before;
63 import org.junit.BeforeClass;
64 import org.junit.Test;
65 import org.junit.experimental.categories.Category;
66 import org.mockito.Mockito;
67
68 @Category({ MasterTests.class, SmallTests.class })
69 public class TestReplicationHFileCleaner {
70 private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
71 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
72 private static Server server;
73 private static ReplicationQueues rq;
74 private static ReplicationPeers rp;
75 private static final String peerId = "TestReplicationHFileCleaner";
76 private static Configuration conf = TEST_UTIL.getConfiguration();
77 static FileSystem fs = null;
78 Path root;
79
80
81
82
83 @BeforeClass
84 public static void setUpBeforeClass() throws Exception {
85 TEST_UTIL.startMiniZKCluster();
86 server = new DummyServer();
87 conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
88 Replication.decorateMasterConfiguration(conf);
89 rp = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server);
90 rp.init();
91
92 rq = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
93 rq.init(server.getServerName().toString());
94 try {
95 fs = FileSystem.get(conf);
96 } finally {
97 if (fs != null) {
98 fs.close();
99 }
100 }
101 }
102
103
104
105
106 @AfterClass
107 public static void tearDownAfterClass() throws Exception {
108 TEST_UTIL.shutdownMiniZKCluster();
109 }
110
111 @Before
112 public void setup() throws ReplicationException, IOException {
113 root = TEST_UTIL.getDataTestDirOnTestFS();
114 rp.addPeer(peerId, new ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()));
115 rq.addPeerToHFileRefs(peerId);
116 }
117
118 @After
119 public void cleanup() throws ReplicationException {
120 try {
121 fs.delete(root, true);
122 } catch (IOException e) {
123 LOG.warn("Failed to delete files recursively from path " + root);
124 }
125 rp.removePeer(peerId);
126 }
127
128 @Test
129 public void testIsFileDeletable() throws IOException, ReplicationException {
130
131 Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
132 fs.createNewFile(file);
133
134 assertTrue("Test file not created!", fs.exists(file));
135 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
136 cleaner.setConf(conf);
137
138 assertTrue("Cleaner should allow to delete this file as there is no hfile reference node "
139 + "for it in the queue.",
140 cleaner.isFileDeletable(fs.getFileStatus(file)));
141
142 List<Pair<Path, Path>> files = new ArrayList<>(1);
143 files.add(new Pair<Path, Path>(null, file));
144
145 rq.addHFileRefs(peerId, files);
146
147 assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node "
148 + "for it in the queue.",
149 cleaner.isFileDeletable(fs.getFileStatus(file)));
150 }
151
152 @Test
153 public void testGetDeletableFiles() throws Exception {
154
155 Path notDeletablefile = new Path(root, "testGetDeletableFiles_1");
156 fs.createNewFile(notDeletablefile);
157 assertTrue("Test file not created!", fs.exists(notDeletablefile));
158 Path deletablefile = new Path(root, "testGetDeletableFiles_2");
159 fs.createNewFile(deletablefile);
160 assertTrue("Test file not created!", fs.exists(deletablefile));
161
162 List<FileStatus> files = new ArrayList<FileStatus>(2);
163 FileStatus f = new FileStatus();
164 f.setPath(deletablefile);
165 files.add(f);
166 f = new FileStatus();
167 f.setPath(notDeletablefile);
168 files.add(f);
169
170 List<Pair<Path, Path>> hfiles = new ArrayList<>(1);
171 hfiles.add(new Pair<Path, Path>(null, notDeletablefile));
172
173 rq.addHFileRefs(peerId, hfiles);
174
175 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
176 cleaner.setConf(conf);
177 Iterator<FileStatus> deletableFilesIterator = cleaner.getDeletableFiles(files).iterator();
178 int i = 0;
179 while (deletableFilesIterator.hasNext() && i < 2) {
180 i++;
181 }
182
183 if (i > 2) {
184 fail("File " + notDeletablefile
185 + " should not be deletable as its hfile reference node is not added.");
186 }
187 assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile));
188 }
189
190
191
192
193
194 @Test(timeout = 15000)
195 public void testForDifferntHFileRefsZnodeVersion() throws Exception {
196
197 Path file = new Path(root, "testForDifferntHFileRefsZnodeVersion");
198 fs.createNewFile(file);
199
200 assertTrue("Test file not created!", fs.exists(file));
201 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
202 cleaner.setConf(conf);
203
204 ReplicationQueuesClient replicationQueuesClient = Mockito.mock(ReplicationQueuesClient.class);
205
206 Mockito.when(replicationQueuesClient.getHFileRefsNodeChangeVersion()).thenReturn(1, 2);
207
208 Class<? extends ReplicationHFileCleaner> cleanerClass = cleaner.getClass();
209 Field rqc = cleanerClass.getDeclaredField("rqc");
210 rqc.setAccessible(true);
211 rqc.set(cleaner, replicationQueuesClient);
212
213 cleaner.isFileDeletable(fs.getFileStatus(file));
214 }
215
216
217
218
219 @Test
220 public void testZooKeeperAbort() throws Exception {
221 ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
222
223 List<FileStatus> dummyFiles =
224 Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path(
225 "hfile1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path(
226 "hfile2")));
227
228 FaultyZooKeeperWatcher faultyZK =
229 new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
230 try {
231 faultyZK.init();
232 cleaner.setConf(conf, faultyZK);
233
234 Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
235 assertFalse(toDelete.iterator().hasNext());
236 assertFalse(cleaner.isStopped());
237 } finally {
238 faultyZK.close();
239 }
240
241
242 cleaner = new ReplicationHFileCleaner();
243 ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
244 try {
245 cleaner.setConf(conf, zkw);
246 Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
247 Iterator<FileStatus> iter = filesToDelete.iterator();
248 assertTrue(iter.hasNext());
249 assertEquals(new Path("hfile1"), iter.next().getPath());
250 assertTrue(iter.hasNext());
251 assertEquals(new Path("hfile2"), iter.next().getPath());
252 assertFalse(iter.hasNext());
253 } finally {
254 zkw.close();
255 }
256 }
257
258 static class DummyServer implements Server {
259
260 @Override
261 public Configuration getConfiguration() {
262 return TEST_UTIL.getConfiguration();
263 }
264
265 @Override
266 public ZooKeeperWatcher getZooKeeper() {
267 try {
268 return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
269 } catch (IOException e) {
270 e.printStackTrace();
271 }
272 return null;
273 }
274
275 @Override
276 public CoordinatedStateManager getCoordinatedStateManager() {
277 return null;
278 }
279
280 @Override
281 public ClusterConnection getConnection() {
282 return null;
283 }
284
285 @Override
286 public MetaTableLocator getMetaTableLocator() {
287 return null;
288 }
289
290 @Override
291 public ServerName getServerName() {
292 return ServerName.valueOf("regionserver,60020,000000");
293 }
294
295 @Override
296 public void abort(String why, Throwable e) {
297 }
298
299 @Override
300 public boolean isAborted() {
301 return false;
302 }
303
304 @Override
305 public void stop(String why) {
306 }
307
308 @Override
309 public boolean isStopped() {
310 return false;
311 }
312
313 @Override
314 public ChoreService getChoreService() {
315 return null;
316 }
317 }
318
319 static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {
320 private RecoverableZooKeeper zk;
321 public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable)
322 throws ZooKeeperConnectionException, IOException {
323 super(conf, identifier, abortable);
324 }
325
326 public void init() throws Exception {
327 this.zk = spy(super.getRecoverableZooKeeper());
328 doThrow(new KeeperException.ConnectionLossException())
329 .when(zk).getData("/hbase/replication/hfile-refs", null, new Stat());
330 }
331
332 public RecoverableZooKeeper getRecoverableZooKeeper() {
333 return zk;
334 }
335 }
336 }