View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.master;
20  
21  import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertTrue;
25  import static org.mockito.Mockito.doReturn;
26  import static org.mockito.Mockito.spy;
27  
28  import java.io.IOException;
29  import java.util.ArrayList;
30  import java.util.List;
31  import java.util.Map;
32  import java.util.SortedMap;
33  import java.util.TreeMap;
34  
35  import org.apache.commons.logging.Log;
36  import org.apache.commons.logging.LogFactory;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.FSDataOutputStream;
39  import org.apache.hadoop.fs.FileStatus;
40  import org.apache.hadoop.fs.FileSystem;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.hbase.ChoreService;
43  import org.apache.hadoop.hbase.CoordinatedStateManager;
44  import org.apache.hadoop.hbase.HBaseTestingUtility;
45  import org.apache.hadoop.hbase.HColumnDescriptor;
46  import org.apache.hadoop.hbase.HConstants;
47  import org.apache.hadoop.hbase.HRegionInfo;
48  import org.apache.hadoop.hbase.HTableDescriptor;
49  import org.apache.hadoop.hbase.MetaMockingUtil;
50  import org.apache.hadoop.hbase.NamespaceDescriptor;
51  import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
52  import org.apache.hadoop.hbase.ProcedureInfo;
53  import org.apache.hadoop.hbase.Server;
54  import org.apache.hadoop.hbase.ServerName;
55  import org.apache.hadoop.hbase.testclassification.SmallTests;
56  import org.apache.hadoop.hbase.TableDescriptors;
57  import org.apache.hadoop.hbase.TableName;
58  import org.apache.hadoop.hbase.client.ClusterConnection;
59  import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
60  import org.apache.hadoop.hbase.client.Result;
61  import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
62  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
63  import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
64  import org.apache.hadoop.hbase.executor.ExecutorService;
65  import org.apache.hadoop.hbase.io.Reference;
66  import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
67  import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
68  import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
69  import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
70  import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
71  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
72  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
73  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
74  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
75  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
76  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
77  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
78  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
79  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
80  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
81  import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
82  import org.apache.hadoop.hbase.regionserver.HStore;
83  import org.apache.hadoop.hbase.security.User;
84  import org.apache.hadoop.hbase.util.Bytes;
85  import org.apache.hadoop.hbase.util.FSUtils;
86  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
87  import org.apache.hadoop.hbase.util.Triple;
88  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
89  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
90  import org.junit.Test;
91  import org.junit.experimental.categories.Category;
92  import org.mockito.Mockito;
93  import org.mockito.invocation.InvocationOnMock;
94  import org.mockito.stubbing.Answer;
95  
96  import com.google.protobuf.RpcController;
97  import com.google.protobuf.Service;
98  import com.google.protobuf.ServiceException;
99  
100 @Category(SmallTests.class)
101 public class TestCatalogJanitor {
102   private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
103 
104   /**
105    * Pseudo server for below tests.
106    * Be sure to call stop on the way out else could leave some mess around.
107    */
108   class MockServer implements Server {
109     private final ClusterConnection connection;
110     private final Configuration c;
111 
112     MockServer(final HBaseTestingUtility htu)
113     throws NotAllMetaRegionsOnlineException, IOException, InterruptedException {
114       this.c = htu.getConfiguration();
115       ClientProtos.ClientService.BlockingInterface ri =
116         Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
117       MutateResponse.Builder builder = MutateResponse.newBuilder();
118       builder.setProcessed(true);
119       try {
120         Mockito.when(ri.mutate(
121           (RpcController)Mockito.any(), (MutateRequest)Mockito.any())).
122             thenReturn(builder.build());
123       } catch (ServiceException se) {
124         throw ProtobufUtil.getRemoteException(se);
125       }
126       try {
127         Mockito.when(ri.multi(
128           (RpcController)Mockito.any(), (MultiRequest)Mockito.any())).
129             thenAnswer(new Answer<MultiResponse>() {
130               @Override
131               public MultiResponse answer(InvocationOnMock invocation) throws Throwable {
132                 return buildMultiResponse( (MultiRequest)invocation.getArguments()[1]);
133               }
134             });
135       } catch (ServiceException se) {
136         throw ProtobufUtil.getRemoteException(se);
137       }
138       // Mock an HConnection and a AdminProtocol implementation.  Have the
139       // HConnection return the HRI.  Have the HRI return a few mocked up responses
140       // to make our test work.
141       this.connection =
142         HConnectionTestingUtility.getMockedConnectionAndDecorate(this.c,
143           Mockito.mock(AdminProtos.AdminService.BlockingInterface.class), ri,
144             ServerName.valueOf("example.org,12345,6789"),
145           HRegionInfo.FIRST_META_REGIONINFO);
146       // Set hbase.rootdir into test dir.
147       FileSystem fs = FileSystem.get(this.c);
148       Path rootdir = FSUtils.getRootDir(this.c);
149       FSUtils.setRootDir(this.c, rootdir);
150       AdminProtos.AdminService.BlockingInterface hri =
151         Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
152     }
153 
154     @Override
155     public ClusterConnection getConnection() {
156       return this.connection;
157     }
158 
159     @Override
160     public MetaTableLocator getMetaTableLocator() {
161       return null;
162     }
163 
164     @Override
165     public Configuration getConfiguration() {
166       return this.c;
167     }
168 
169     @Override
170     public ServerName getServerName() {
171       return ServerName.valueOf("mockserver.example.org", 1234, -1L);
172     }
173 
174     @Override
175     public ZooKeeperWatcher getZooKeeper() {
176       return null;
177     }
178 
179     @Override
180     public CoordinatedStateManager getCoordinatedStateManager() {
181       BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class);
182       SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class);
183       Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c);
184       SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class);
185       Mockito.when(c.getDetails()).thenReturn(d);
186       return m;
187     }
188 
189     @Override
190     public void abort(String why, Throwable e) {
191       //no-op
192     }
193 
194     @Override
195     public boolean isAborted() {
196       return false;
197     }
198 
199     @Override
200     public boolean isStopped() {
201       return false;
202     }
203 
204     @Override
205     public void stop(String why) {
206     }
207 
208     @Override
209     public ChoreService getChoreService() {
210       return null;
211     }
212   }
213 
214   /**
215    * Mock MasterServices for tests below.
216    */
217   class MockMasterServices implements MasterServices {
218     private final MasterFileSystem mfs;
219     private final AssignmentManager asm;
220     private final ServerManager sm;
221 
222     MockMasterServices(final Server server) throws IOException {
223       this.mfs = new MasterFileSystem(server, this);
224       this.asm = Mockito.mock(AssignmentManager.class);
225       this.sm = Mockito.mock(ServerManager.class);
226     }
227 
228     @Override
229     public void checkTableModifiable(TableName tableName) throws IOException {
230       //no-op
231     }
232 
233     @Override
234     public long createTable(
235         final HTableDescriptor desc,
236         final byte[][] splitKeys,
237         final long nonceGroup,
238         final long nonce) throws IOException {
239       // no-op
240       return -1;
241     }
242 
243     @Override
244     public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
245       return -1;
246     }
247 
248     @Override
249     public SnapshotManager getSnapshotManager() {
250       return null;
251     }
252 
253     @Override
254     public MasterProcedureManagerHost getMasterProcedureManagerHost() {
255       return null;
256     }
257 
258     @Override
259     public AssignmentManager getAssignmentManager() {
260       return this.asm;
261     }
262 
263     @Override
264     public ExecutorService getExecutorService() {
265       return null;
266     }
267 
268     @Override
269     public ChoreService getChoreService() {
270       return null;
271     }
272 
273     @Override
274     public MasterFileSystem getMasterFileSystem() {
275       return this.mfs;
276     }
277 
278     @Override
279     public MasterCoprocessorHost getMasterCoprocessorHost() {
280       return null;
281     }
282 
283     @Override
284     public MasterQuotaManager getMasterQuotaManager() {
285       return null;
286     }
287 
288     @Override
289     public ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
290       return null;
291     }
292 
293     @Override
294     public ServerManager getServerManager() {
295       return sm;
296     }
297 
298     @Override
299     public ZooKeeperWatcher getZooKeeper() {
300       return null;
301     }
302 
303     @Override
304     public CoordinatedStateManager getCoordinatedStateManager() {
305       return null;
306     }
307 
308     @Override
309     public MetaTableLocator getMetaTableLocator() {
310       return null;
311     }
312 
313     @Override
314     public ClusterConnection getConnection() {
315       return null;
316     }
317 
318     @Override
319     public Configuration getConfiguration() {
320       return mfs.conf;
321     }
322 
323     @Override
324     public ServerName getServerName() {
325       return null;
326     }
327 
328     @Override
329     public void abort(String why, Throwable e) {
330       //no-op
331     }
332 
333     @Override
334     public boolean isAborted() {
335       return false;
336     }
337 
338     private boolean stopped = false;
339 
340     @Override
341     public void stop(String why) {
342       stopped = true;
343     }
344 
345     @Override
346     public boolean isStopped() {
347       return stopped;
348     }
349 
350     @Override
351     public TableDescriptors getTableDescriptors() {
352       return new TableDescriptors() {
353         @Override
354         public HTableDescriptor remove(TableName tablename) throws IOException {
355           // TODO Auto-generated method stub
356           return null;
357         }
358 
359         @Override
360         public Map<String, HTableDescriptor> getAll() throws IOException {
361           // TODO Auto-generated method stub
362           return null;
363         }
364 
365         @Override
366         public HTableDescriptor get(TableName tablename)
367         throws IOException {
368           return createHTableDescriptor();
369         }
370 
371         @Override
372         public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
373           return null;
374         }
375 
376         @Override
377         public void add(HTableDescriptor htd) throws IOException {
378           // TODO Auto-generated method stub
379 
380         }
381         @Override
382         public void setCacheOn() throws IOException {
383         }
384 
385         @Override
386         public void setCacheOff() throws IOException {
387         }
388       };
389     }
390 
391     @Override
392     public boolean isServerCrashProcessingEnabled() {
393       return true;
394     }
395 
396     @Override
397     public boolean registerService(Service instance) {
398       return false;
399     }
400 
401     @Override
402     public void createNamespace(
403         final NamespaceDescriptor descriptor,
404         final long nonceGroup,
405         final long nonce) throws IOException {
406       //To change body of implemented methods use File | Settings | File Templates.
407     }
408 
409     @Override
410     public void createNamespaceSync(
411         final NamespaceDescriptor descriptor,
412         final long nonceGroup,
413         final long nonce,
414         final boolean executeCoprocessor) throws IOException {
415       //To change body of implemented methods use File | Settings | File Templates.
416     }
417 
418     @Override
419     public void modifyNamespace(
420         final NamespaceDescriptor descriptor,
421         final long nonceGroup,
422         final long nonce) throws IOException {
423       //To change body of implemented methods use File | Settings | File Templates.
424     }
425 
426     @Override
427     public void deleteNamespace(
428         final String name,
429         final long nonceGroup,
430         final long nonce) throws IOException {
431       //To change body of implemented methods use File | Settings | File Templates.
432     }
433 
434     @Override
435     public NamespaceDescriptor getNamespaceDescriptor(String name) throws IOException {
436       return null;  //To change body of implemented methods use File | Settings | File Templates.
437     }
438 
439     @Override
440     public List<String> listNamespaces() throws IOException {
441       return null;
442     }
443 
444     @Override
445     public List<NamespaceDescriptor> listNamespaceDescriptors() throws IOException {
446       return null;  //To change body of implemented methods use File | Settings | File Templates.
447     }
448 
449     @Override
450     public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning)
451         throws IOException {
452       return false;  //To change body of implemented methods use File | Settings | File Templates.
453     }
454 
455     @Override
456     public List<ProcedureInfo> listProcedures() throws IOException {
457       return null;  //To change body of implemented methods use File | Settings | File Templates.
458     }
459 
460     @Override
461     public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
462       return null;  //To change body of implemented methods use File | Settings | File Templates.
463     }
464 
465     @Override
466     public List<TableName> listTableNamesByNamespace(String name) throws IOException {
467       return null;
468     }
469 
470     @Override
471     public long deleteTable(
472         final TableName tableName,
473         final long nonceGroup,
474         final long nonce) throws IOException {
475       return -1;
476     }
477 
478     public void truncateTable(
479         final TableName tableName,
480         final boolean preserveSplits,
481         final long nonceGroup,
482         final long nonce) throws IOException {
483     }
484 
485     @Override
486     public void modifyTable(
487         final TableName tableName,
488         final HTableDescriptor descriptor,
489         final long nonceGroup,
490         final long nonce) throws IOException {
491     }
492 
493     @Override
494     public long enableTable(
495         final TableName tableName,
496         final long nonceGroup,
497         final long nonce) throws IOException {
498       return -1;
499     }
500     public LoadBalancer getLoadBalancer() {
501       return null;
502     }
503 
504     @Override
505     public long disableTable(
506         TableName tableName,
507         final long nonceGroup,
508         final long nonce) throws IOException {
509       return -1;
510     }
511 
512     @Override
513     public void addColumn(
514         final TableName tableName,
515         final HColumnDescriptor columnDescriptor,
516         final long nonceGroup,
517         final long nonce) throws IOException { }
518 
519     @Override
520     public void modifyColumn(
521         final TableName tableName,
522         final HColumnDescriptor descriptor,
523         final long nonceGroup,
524         final long nonce) throws IOException { }
525 
526     @Override
527     public void deleteColumn(
528         final TableName tableName,
529         final byte[] columnName,
530         final long nonceGroup,
531         final long nonce) throws IOException { }
532 
533     @Override
534     public TableLockManager getTableLockManager() {
535       return null;
536     }
537 
538     @Override
539     public TableNamespaceManager getTableNamespaceManager() {
540       return null;
541     }
542 
543     @Override
544     public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b,
545         boolean forcible, User user) throws IOException {
546     }
547 
548     @Override
549     public boolean isInitialized() {
550       // Auto-generated method stub
551       return false;
552     }
553 
554     @Override
555     public boolean isInMaintenanceMode() {
556       return false;
557     }
558 
559     @Override
560     public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
561       // Auto-generated method stub
562       return 0;
563     }
564 
565     @Override
566     public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
567       // Auto-generated method stub
568       return 0;
569     }
570 
571     @Override
572     public String getRegionServerVersion(ServerName sn) {
573       return null;
574     }
575 
576     @Override
577     public void checkIfShouldMoveSystemRegionAsync() {
578     }
579   }
580 
581   @Test
582   public void testCleanParent() throws IOException, InterruptedException {
583     HBaseTestingUtility htu = new HBaseTestingUtility();
584     setRootDirAndCleanIt(htu, "testCleanParent");
585     Server server = new MockServer(htu);
586     try {
587       MasterServices services = new MockMasterServices(server);
588       CatalogJanitor janitor = new CatalogJanitor(server, services);
589       // Create regions.
590       HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
591       htd.addFamily(new HColumnDescriptor("f"));
592       HRegionInfo parent =
593         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
594             Bytes.toBytes("eee"));
595       HRegionInfo splita =
596         new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
597             Bytes.toBytes("ccc"));
598       HRegionInfo splitb =
599         new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
600             Bytes.toBytes("eee"));
601       // Test that when both daughter regions are in place, that we do not
602       // remove the parent.
603       Result r = createResult(parent, splita, splitb);
604       // Add a reference under splitA directory so we don't clear out the parent.
605       Path rootdir = services.getMasterFileSystem().getRootDir();
606       Path tabledir =
607         FSUtils.getTableDir(rootdir, htd.getTableName());
608       Path storedir = HStore.getStoreHomedir(tabledir, splita,
609           htd.getColumnFamilies()[0].getName());
610       Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
611       long now = System.currentTimeMillis();
612       // Reference name has this format: StoreFile#REF_NAME_PARSER
613       Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
614       FileSystem fs = services.getMasterFileSystem().getFileSystem();
615       Path path = ref.write(fs, p);
616       assertTrue(fs.exists(path));
617       assertFalse(janitor.cleanParent(parent, r));
618       // Remove the reference file and try again.
619       assertTrue(fs.delete(p, true));
620       assertTrue(janitor.cleanParent(parent, r));
621     } finally {
622       server.stop("shutdown");
623     }
624   }
625 
626   /**
627    * Make sure parent gets cleaned up even if daughter is cleaned up before it.
628    * @throws IOException
629    * @throws InterruptedException
630    */
631   @Test
632   public void testParentCleanedEvenIfDaughterGoneFirst()
633   throws IOException, InterruptedException {
634     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
635       "testParentCleanedEvenIfDaughterGoneFirst", Bytes.toBytes("eee"));
636   }
637 
638   /**
639    * Make sure last parent with empty end key gets cleaned up even if daughter is cleaned up before it.
640    * @throws IOException
641    * @throws InterruptedException
642    */
643   @Test
644   public void testLastParentCleanedEvenIfDaughterGoneFirst()
645   throws IOException, InterruptedException {
646     parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
647       "testLastParentCleanedEvenIfDaughterGoneFirst", new byte[0]);
648   }
649 
650   /**
651    * Make sure parent with specified end key gets cleaned up even if daughter is cleaned up before it.
652    *
653    * @param rootDir the test case name, used as the HBase testing utility root
654    * @param lastEndKey the end key of the split parent
655    * @throws IOException
656    * @throws InterruptedException
657    */
658   private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
659   final String rootDir, final byte[] lastEndKey)
660   throws IOException, InterruptedException {
661     HBaseTestingUtility htu = new HBaseTestingUtility();
662     setRootDirAndCleanIt(htu, rootDir);
663     Server server = new MockServer(htu);
664     MasterServices services = new MockMasterServices(server);
665     CatalogJanitor janitor = new CatalogJanitor(server, services);
666     final HTableDescriptor htd = createHTableDescriptor();
667 
668     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
669 
670     // Parent
671     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
672       lastEndKey);
673     // Sleep a second else the encoded name on these regions comes out
674     // same for all with same start key and made in same second.
675     Thread.sleep(1001);
676 
677     // Daughter a
678     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
679       Bytes.toBytes("ccc"));
680     Thread.sleep(1001);
681     // Make daughters of daughter a; splitaa and splitab.
682     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
683       Bytes.toBytes("bbb"));
684     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
685       Bytes.toBytes("ccc"));
686 
687     // Daughter b
688     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
689       lastEndKey);
690     Thread.sleep(1001);
691     // Make Daughters of daughterb; splitba and splitbb.
692     HRegionInfo splitba = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
693       Bytes.toBytes("ddd"));
694     HRegionInfo splitbb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ddd"),
695     lastEndKey);
696 
697     // First test that our Comparator works right up in CatalogJanitor.
698     // Just fo kicks.
699     SortedMap<HRegionInfo, Result> regions =
700       new TreeMap<HRegionInfo, Result>(new CatalogJanitor.SplitParentFirstComparator());
701     // Now make sure that this regions map sorts as we expect it to.
702     regions.put(parent, createResult(parent, splita, splitb));
703     regions.put(splitb, createResult(splitb, splitba, splitbb));
704     regions.put(splita, createResult(splita, splitaa, splitab));
705     // Assert its properly sorted.
706     int index = 0;
707     for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
708       if (index == 0) {
709         assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
710       } else if (index == 1) {
711         assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
712       } else if (index == 2) {
713         assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
714       }
715       index++;
716     }
717 
718     // Now play around with the cleanParent function.  Create a ref from splita
719     // up to the parent.
720     Path splitaRef =
721       createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
722     // Make sure actual super parent sticks around because splita has a ref.
723     assertFalse(janitor.cleanParent(parent, regions.get(parent)));
724 
725     //splitba, and split bb, do not have dirs in fs.  That means that if
726     // we test splitb, it should get cleaned up.
727     assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));
728 
729     // Now remove ref from splita to parent... so parent can be let go and so
730     // the daughter splita can be split (can't split if still references).
731     // BUT make the timing such that the daughter gets cleaned up before we
732     // can get a chance to let go of the parent.
733     FileSystem fs = FileSystem.get(htu.getConfiguration());
734     assertTrue(fs.delete(splitaRef, true));
735     // Create the refs from daughters of splita.
736     Path splitaaRef =
737       createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
738     Path splitabRef =
739       createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true);
740 
741     // Test splita.  It should stick around because references from splitab, etc.
742     assertFalse(janitor.cleanParent(splita, regions.get(splita)));
743 
744     // Now clean up parent daughter first.  Remove references from its daughters.
745     assertTrue(fs.delete(splitaaRef, true));
746     assertTrue(fs.delete(splitabRef, true));
747     assertTrue(janitor.cleanParent(splita, regions.get(splita)));
748 
749     // Super parent should get cleaned up now both splita and splitb are gone.
750     assertTrue(janitor.cleanParent(parent, regions.get(parent)));
751 
752     services.stop("test finished");
753     janitor.cancel(true);
754   }
755 
756   /**
757    * CatalogJanitor.scan() should not clean parent regions if their own
758    * parents are still referencing them. This ensures that grandfather regions
759    * do not point to deleted parent regions.
760    */
761   @Test
762   public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
763     HBaseTestingUtility htu = new HBaseTestingUtility();
764     setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
765     Server server = new MockServer(htu);
766     MasterServices services = new MockMasterServices(server);
767 
768     final HTableDescriptor htd = createHTableDescriptor();
769 
770     // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
771 
772     // Parent
773     HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
774       new byte[0], true);
775     // Sleep a second else the encoded name on these regions comes out
776     // same for all with same start key and made in same second.
777     Thread.sleep(1001);
778 
779     // Daughter a
780     HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
781       Bytes.toBytes("ccc"), true);
782     Thread.sleep(1001);
783     // Make daughters of daughter a; splitaa and splitab.
784     HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
785       Bytes.toBytes("bbb"), false);
786     HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
787       Bytes.toBytes("ccc"), false);
788 
789     // Daughter b
790     HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
791         new byte[0]);
792     Thread.sleep(1001);
793 
794     final Map<HRegionInfo, Result> splitParents =
795         new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
796     splitParents.put(parent, createResult(parent, splita, splitb));
797     splita.setOffline(true); //simulate that splita goes offline when it is split
798     splitParents.put(splita, createResult(splita, splitaa,splitab));
799 
800     final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
801     CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
802     doReturn(new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
803             10, mergedRegions, splitParents)).when(janitor)
804         .getMergedRegionsAndSplitParents();
805 
806     //create ref from splita to parent
807     Path splitaRef =
808         createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
809 
810     //parent and A should not be removed
811     assertEquals(0, janitor.scan());
812 
813     //now delete the ref
814     FileSystem fs = FileSystem.get(htu.getConfiguration());
815     assertTrue(fs.delete(splitaRef, true));
816 
817     //now, both parent, and splita can be deleted
818     assertEquals(2, janitor.scan());
819 
820     services.stop("test finished");
821     janitor.cancel(true);
822   }
823 
824   /**
825    * Test that we correctly archive all the storefiles when a region is deleted
826    * @throws Exception
827    */
828   @Test
829   public void testSplitParentFirstComparator() {
830     SplitParentFirstComparator comp = new SplitParentFirstComparator();
831     final HTableDescriptor htd = createHTableDescriptor();
832 
833     /*  Region splits:
834      *
835      *  rootRegion --- firstRegion --- firstRegiona
836      *              |               |- firstRegionb
837      *              |
838      *              |- lastRegion --- lastRegiona  --- lastRegionaa
839      *                             |                |- lastRegionab
840      *                             |- lastRegionb
841      *
842      *  rootRegion   :   []  - []
843      *  firstRegion  :   []  - bbb
844      *  lastRegion   :   bbb - []
845      *  firstRegiona :   []  - aaa
846      *  firstRegionb :   aaa - bbb
847      *  lastRegiona  :   bbb - ddd
848      *  lastRegionb  :   ddd - []
849      */
850 
851     // root region
852     HRegionInfo rootRegion = new HRegionInfo(htd.getTableName(),
853       HConstants.EMPTY_START_ROW,
854       HConstants.EMPTY_END_ROW, true);
855     HRegionInfo firstRegion = new HRegionInfo(htd.getTableName(),
856       HConstants.EMPTY_START_ROW,
857       Bytes.toBytes("bbb"), true);
858     HRegionInfo lastRegion = new HRegionInfo(htd.getTableName(),
859       Bytes.toBytes("bbb"),
860       HConstants.EMPTY_END_ROW, true);
861 
862     assertTrue(comp.compare(rootRegion, rootRegion) == 0);
863     assertTrue(comp.compare(firstRegion, firstRegion) == 0);
864     assertTrue(comp.compare(lastRegion, lastRegion) == 0);
865     assertTrue(comp.compare(rootRegion, firstRegion) < 0);
866     assertTrue(comp.compare(rootRegion, lastRegion) < 0);
867     assertTrue(comp.compare(firstRegion, lastRegion) < 0);
868 
869     //first region split into a, b
870     HRegionInfo firstRegiona = new HRegionInfo(htd.getTableName(),
871       HConstants.EMPTY_START_ROW,
872       Bytes.toBytes("aaa"), true);
873     HRegionInfo firstRegionb = new HRegionInfo(htd.getTableName(),
874         Bytes.toBytes("aaa"),
875       Bytes.toBytes("bbb"), true);
876     //last region split into a, b
877     HRegionInfo lastRegiona = new HRegionInfo(htd.getTableName(),
878       Bytes.toBytes("bbb"),
879       Bytes.toBytes("ddd"), true);
880     HRegionInfo lastRegionb = new HRegionInfo(htd.getTableName(),
881       Bytes.toBytes("ddd"),
882       HConstants.EMPTY_END_ROW, true);
883 
884     assertTrue(comp.compare(firstRegiona, firstRegiona) == 0);
885     assertTrue(comp.compare(firstRegionb, firstRegionb) == 0);
886     assertTrue(comp.compare(rootRegion, firstRegiona) < 0);
887     assertTrue(comp.compare(rootRegion, firstRegionb) < 0);
888     assertTrue(comp.compare(firstRegion, firstRegiona) < 0);
889     assertTrue(comp.compare(firstRegion, firstRegionb) < 0);
890     assertTrue(comp.compare(firstRegiona, firstRegionb) < 0);
891 
892     assertTrue(comp.compare(lastRegiona, lastRegiona) == 0);
893     assertTrue(comp.compare(lastRegionb, lastRegionb) == 0);
894     assertTrue(comp.compare(rootRegion, lastRegiona) < 0);
895     assertTrue(comp.compare(rootRegion, lastRegionb) < 0);
896     assertTrue(comp.compare(lastRegion, lastRegiona) < 0);
897     assertTrue(comp.compare(lastRegion, lastRegionb) < 0);
898     assertTrue(comp.compare(lastRegiona, lastRegionb) < 0);
899 
900     assertTrue(comp.compare(firstRegiona, lastRegiona) < 0);
901     assertTrue(comp.compare(firstRegiona, lastRegionb) < 0);
902     assertTrue(comp.compare(firstRegionb, lastRegiona) < 0);
903     assertTrue(comp.compare(firstRegionb, lastRegionb) < 0);
904 
905     HRegionInfo lastRegionaa = new HRegionInfo(htd.getTableName(),
906       Bytes.toBytes("bbb"),
907       Bytes.toBytes("ccc"), false);
908     HRegionInfo lastRegionab = new HRegionInfo(htd.getTableName(),
909       Bytes.toBytes("ccc"),
910       Bytes.toBytes("ddd"), false);
911 
912     assertTrue(comp.compare(lastRegiona, lastRegionaa) < 0);
913     assertTrue(comp.compare(lastRegiona, lastRegionab) < 0);
914     assertTrue(comp.compare(lastRegionaa, lastRegionab) < 0);
915 
916   }
917 
918   @Test
919   public void testArchiveOldRegion() throws Exception {
920     String table = "table";
921     HBaseTestingUtility htu = new HBaseTestingUtility();
922     setRootDirAndCleanIt(htu, "testCleanParent");
923     Server server = new MockServer(htu);
924     MasterServices services = new MockMasterServices(server);
925 
926     // create the janitor
927     CatalogJanitor janitor = new CatalogJanitor(server, services);
928 
929     // Create regions.
930     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
931     htd.addFamily(new HColumnDescriptor("f"));
932     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
933         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
934     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
935         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
936     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
937         Bytes.toBytes("ccc"),
938         Bytes.toBytes("eee"));
939 
940     // Test that when both daughter regions are in place, that we do not
941     // remove the parent.
942     Result parentMetaRow = createResult(parent, splita, splitb);
943     FileSystem fs = FileSystem.get(htu.getConfiguration());
944     Path rootdir = services.getMasterFileSystem().getRootDir();
945     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
946     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
947     // the single test passes, but when the full suite is run, things get borked).
948     FSUtils.setRootDir(fs.getConf(), rootdir);
949     Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
950     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
951     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
952       tabledir, htd.getColumnFamilies()[0].getName());
953     LOG.debug("Table dir:" + tabledir);
954     LOG.debug("Store dir:" + storedir);
955     LOG.debug("Store archive dir:" + storeArchive);
956 
957     // add a couple of store files that we can check for
958     FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
959     // get the current store files for comparison
960     FileStatus[] storeFiles = fs.listStatus(storedir);
961     int index = 0;
962     for (FileStatus file : storeFiles) {
963       LOG.debug("Have store file:" + file.getPath());
964       assertEquals("Got unexpected store file", mockFiles[index].getPath(),
965         storeFiles[index].getPath());
966       index++;
967     }
968 
969     // do the cleaning of the parent
970     assertTrue(janitor.cleanParent(parent, parentMetaRow));
971     LOG.debug("Finished cleanup of parent region");
972 
973     // and now check to make sure that the files have actually been archived
974     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
975     logFiles("archived files", storeFiles);
976     logFiles("archived files", archivedStoreFiles);
977 
978     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
979 
980     // cleanup
981     FSUtils.delete(fs, rootdir, true);
982     services.stop("Test finished");
983     server.stop("Test finished");
984     janitor.cancel(true);
985   }
986 
987   /**
988    * @param description description of the files for logging
989    * @param storeFiles the status of the files to log
990    */
991   private void logFiles(String description, FileStatus[] storeFiles) {
992     LOG.debug("Current " + description + ": ");
993     for (FileStatus file : storeFiles) {
994       LOG.debug(file.getPath());
995     }
996   }
997 
998   /**
999    * Test that if a store file with the same name is present as those already backed up cause the
1000    * already archived files to be timestamped backup
1001    */
1002   @Test
1003   public void testDuplicateHFileResolution() throws Exception {
1004     String table = "table";
1005     HBaseTestingUtility htu = new HBaseTestingUtility();
1006     setRootDirAndCleanIt(htu, "testCleanParent");
1007     Server server = new MockServer(htu);
1008     MasterServices services = new MockMasterServices(server);
1009 
1010     // create the janitor
1011 
1012     CatalogJanitor janitor = new CatalogJanitor(server, services);
1013 
1014     // Create regions.
1015     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
1016     htd.addFamily(new HColumnDescriptor("f"));
1017     HRegionInfo parent = new HRegionInfo(htd.getTableName(),
1018         Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
1019     HRegionInfo splita = new HRegionInfo(htd.getTableName(),
1020         Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
1021     HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
1022         Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
1023     // Test that when both daughter regions are in place, that we do not
1024     // remove the parent.
1025     Result r = createResult(parent, splita, splitb);
1026 
1027     FileSystem fs = FileSystem.get(htu.getConfiguration());
1028 
1029     Path rootdir = services.getMasterFileSystem().getRootDir();
1030     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
1031     // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
1032     // the single test passes, but when the full suite is run, things get borked).
1033     FSUtils.setRootDir(fs.getConf(), rootdir);
1034     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
1035     Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
1036     System.out.println("Old root:" + rootdir);
1037     System.out.println("Old table:" + tabledir);
1038     System.out.println("Old store:" + storedir);
1039 
1040     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
1041       tabledir, htd.getColumnFamilies()[0].getName());
1042     System.out.println("Old archive:" + storeArchive);
1043 
1044     // enable archiving, make sure that files get archived
1045     addMockStoreFiles(2, services, storedir);
1046     // get the current store files for comparison
1047     FileStatus[] storeFiles = fs.listStatus(storedir);
1048     // do the cleaning of the parent
1049     assertTrue(janitor.cleanParent(parent, r));
1050 
1051     // and now check to make sure that the files have actually been archived
1052     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
1053     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
1054 
1055     // now add store files with the same names as before to check backup
1056     // enable archiving, make sure that files get archived
1057     addMockStoreFiles(2, services, storedir);
1058 
1059     // do the cleaning of the parent
1060     assertTrue(janitor.cleanParent(parent, r));
1061 
1062     // and now check to make sure that the files have actually been archived
1063     archivedStoreFiles = fs.listStatus(storeArchive);
1064     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
1065 
1066     // cleanup
1067     services.stop("Test finished");
1068     server.stop("shutdown");
1069     janitor.cancel(true);
1070   }
1071 
1072   @Test
1073   public void testAlreadyRunningStatus() throws Exception {
1074     int numberOfThreads = 2;
1075     final List<Integer> gcValues = new ArrayList<>();
1076     Thread[] threads = new Thread[numberOfThreads];
1077     HBaseTestingUtility hBaseTestingUtility = new HBaseTestingUtility();
1078     hBaseTestingUtility.getConfiguration().setInt("hbase.client.retries.number", 5);
1079     Server server = new MockServer(hBaseTestingUtility);
1080     MasterServices services = new MockMasterServices(server);
1081     final CatalogJanitor catalogJanitor = new CatalogJanitor(server, services);
1082     for (int i = 0; i < numberOfThreads; i++) {
1083       threads[i] = new Thread(new Runnable() {
1084         @Override
1085         public void run() {
1086           try {
1087             gcValues.add(catalogJanitor.scan());
1088           } catch (IOException e) {
1089             throw new RuntimeException(e);
1090           }
1091         }
1092       });
1093     }
1094     for (int i = 0; i < numberOfThreads; i++) {
1095       threads[i].start();
1096     }
1097     for (int i = 0; i < numberOfThreads; i++) {
1098       threads[i].join();
1099     }
1100     assertTrue("One janitor.scan() call should have returned -1", gcValues.contains(-1));
1101   }
1102 
1103   private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
1104       throws IOException {
1105     // get the existing store files
1106     FileSystem fs = services.getMasterFileSystem().getFileSystem();
1107     fs.mkdirs(storedir);
1108     // create the store files in the parent
1109     for (int i = 0; i < count; i++) {
1110       Path storeFile = new Path(storedir, "_store" + i);
1111       FSDataOutputStream dos = fs.create(storeFile, true);
1112       dos.writeBytes("Some data: " + i);
1113       dos.close();
1114     }
1115     LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
1116     // make sure the mock store files are there
1117     FileStatus[] storeFiles = fs.listStatus(storedir);
1118     assertEquals("Didn't have expected store files", count, storeFiles.length);
1119     return storeFiles;
1120   }
1121 
1122   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
1123       final String subdir)
1124   throws IOException {
1125     Path testdir = htu.getDataTestDir(subdir);
1126     FileSystem fs = FileSystem.get(htu.getConfiguration());
1127     if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
1128     FSUtils.setRootDir(htu.getConfiguration(), testdir);
1129     return FSUtils.getRootDir(htu.getConfiguration()).toString();
1130   }
1131 
1132   /**
1133    * @param services Master services instance.
1134    * @param htd
1135    * @param parent
1136    * @param daughter
1137    * @param midkey
1138    * @param top True if we are to write a 'top' reference.
1139    * @return Path to reference we created.
1140    * @throws IOException
1141    */
1142   private Path createReferences(final MasterServices services,
1143       final HTableDescriptor htd, final HRegionInfo parent,
1144       final HRegionInfo daughter, final byte [] midkey, final boolean top)
1145   throws IOException {
1146     Path rootdir = services.getMasterFileSystem().getRootDir();
1147     Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
1148     Path storedir = HStore.getStoreHomedir(tabledir, daughter,
1149       htd.getColumnFamilies()[0].getName());
1150     Reference ref =
1151       top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
1152     long now = System.currentTimeMillis();
1153     // Reference name has this format: StoreFile#REF_NAME_PARSER
1154     Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
1155     FileSystem fs = services.getMasterFileSystem().getFileSystem();
1156     ref.write(fs, p);
1157     return p;
1158   }
1159 
1160   private Result createResult(final HRegionInfo parent, final HRegionInfo a,
1161       final HRegionInfo b)
1162   throws IOException {
1163     return MetaMockingUtil.getMetaTableRowResult(parent, null, a, b);
1164   }
1165 
1166   private HTableDescriptor createHTableDescriptor() {
1167     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("t"));
1168     htd.addFamily(new HColumnDescriptor("f"));
1169     return htd;
1170   }
1171 
1172   private MultiResponse buildMultiResponse(MultiRequest req) {
1173     MultiResponse.Builder builder = MultiResponse.newBuilder();
1174     RegionActionResult.Builder regionActionResultBuilder =
1175         RegionActionResult.newBuilder();
1176     ResultOrException.Builder roeBuilder = ResultOrException.newBuilder();
1177     for (RegionAction regionAction: req.getRegionActionList()) {
1178       regionActionResultBuilder.clear();
1179       for (ClientProtos.Action action: regionAction.getActionList()) {
1180         roeBuilder.clear();
1181         roeBuilder.setResult(ClientProtos.Result.getDefaultInstance());
1182         roeBuilder.setIndex(action.getIndex());
1183         regionActionResultBuilder.addResultOrException(roeBuilder.build());
1184       }
1185       builder.addRegionActionResult(regionActionResultBuilder.build());
1186     }
1187     return builder.build();
1188   }
1189 
1190 }
1191