View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver.wal;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertNotNull;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  import static org.mockito.Matchers.any;
26  import static org.mockito.Matchers.eq;
27  import static org.mockito.Mockito.doAnswer;
28  import static org.mockito.Mockito.spy;
29  import static org.mockito.Mockito.when;
30  
31  import java.io.FilterInputStream;
32  import java.io.IOException;
33  import java.lang.reflect.Field;
34  import java.security.PrivilegedExceptionAction;
35  import java.util.ArrayList;
36  import java.util.Arrays;
37  import java.util.Collection;
38  import java.util.HashSet;
39  import java.util.List;
40  import java.util.Set;
41  import java.util.concurrent.atomic.AtomicBoolean;
42  import java.util.concurrent.atomic.AtomicInteger;
43  
44  import org.apache.commons.logging.Log;
45  import org.apache.commons.logging.LogFactory;
46  import org.apache.hadoop.conf.Configuration;
47  import org.apache.hadoop.fs.FSDataInputStream;
48  import org.apache.hadoop.fs.FileStatus;
49  import org.apache.hadoop.fs.FileSystem;
50  import org.apache.hadoop.fs.Path;
51  import org.apache.hadoop.fs.PathFilter;
52  import org.apache.hadoop.hbase.Cell;
53  import org.apache.hadoop.hbase.HBaseConfiguration;
54  import org.apache.hadoop.hbase.HBaseTestingUtility;
55  import org.apache.hadoop.hbase.HColumnDescriptor;
56  import org.apache.hadoop.hbase.HConstants;
57  import org.apache.hadoop.hbase.HRegionInfo;
58  import org.apache.hadoop.hbase.HTableDescriptor;
59  import org.apache.hadoop.hbase.KeyValue;
60  import org.apache.hadoop.hbase.MiniHBaseCluster;
61  import org.apache.hadoop.hbase.ServerName;
62  import org.apache.hadoop.hbase.TableName;
63  import org.apache.hadoop.hbase.client.Delete;
64  import org.apache.hadoop.hbase.client.Get;
65  import org.apache.hadoop.hbase.client.HTable;
66  import org.apache.hadoop.hbase.client.Put;
67  import org.apache.hadoop.hbase.client.Result;
68  import org.apache.hadoop.hbase.client.ResultScanner;
69  import org.apache.hadoop.hbase.client.Scan;
70  import org.apache.hadoop.hbase.client.Table;
71  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
72  import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
73  import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
74  import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
75  import org.apache.hadoop.hbase.regionserver.FlushRequestListener;
76  import org.apache.hadoop.hbase.regionserver.FlushRequester;
77  import org.apache.hadoop.hbase.regionserver.HRegion;
78  import org.apache.hadoop.hbase.regionserver.HRegionServer;
79  import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot;
80  import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
81  import org.apache.hadoop.hbase.regionserver.Region;
82  import org.apache.hadoop.hbase.regionserver.RegionScanner;
83  import org.apache.hadoop.hbase.regionserver.RegionServerServices;
84  import org.apache.hadoop.hbase.regionserver.Store;
85  import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
86  import org.apache.hadoop.hbase.security.User;
87  import org.apache.hadoop.hbase.testclassification.MediumTests;
88  import org.apache.hadoop.hbase.util.Bytes;
89  import org.apache.hadoop.hbase.util.CommonFSUtils;
90  import org.apache.hadoop.hbase.util.EnvironmentEdge;
91  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
92  import org.apache.hadoop.hbase.util.FSUtils;
93  import org.apache.hadoop.hbase.util.HFileTestUtil;
94  import org.apache.hadoop.hbase.util.Pair;
95  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
96  import org.apache.hadoop.hbase.wal.WAL;
97  import org.apache.hadoop.hbase.wal.WALFactory;
98  import org.apache.hadoop.hbase.wal.WALKey;
99  import org.apache.hadoop.hbase.wal.WALSplitter;
100 import org.apache.hadoop.hdfs.DFSInputStream;
101 import org.junit.After;
102 import org.junit.AfterClass;
103 import org.junit.Before;
104 import org.junit.BeforeClass;
105 import org.junit.Rule;
106 import org.junit.Test;
107 import org.junit.experimental.categories.Category;
108 import org.junit.rules.TestName;
109 import org.mockito.Mockito;
110 import org.mockito.invocation.InvocationOnMock;
111 import org.mockito.stubbing.Answer;
112 
113 /**
114  * Test replay of edits out of a WAL split.
115  */
116 @Category(MediumTests.class)
117 public class TestWALReplay {
118   private static final Log LOG = LogFactory.getLog(TestWALReplay.class);
119   static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
120   private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
121   private Path hbaseRootDir = null;
122   private Path hbaseWALRootDir = null;
123   private String logName;
124   private Path oldLogDir;
125   private Path logDir;
126   private FileSystem fs;
127   private Configuration conf;
128   private RecoveryMode mode;
129   private WALFactory wals;
130 
131   @Rule
132   public final TestName currentTest = new TestName();
133 
134 
135   @BeforeClass
136   public static void setUpBeforeClass() throws Exception {
137     Configuration conf = TEST_UTIL.getConfiguration();
138     conf.setBoolean("dfs.support.append", true);
139     // The below config supported by 0.20-append and CDH3b2
140     conf.setInt("dfs.client.block.recovery.retries", 2);
141     TEST_UTIL.startMiniCluster(3);
142     Path hbaseRootDir =
143       TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
144     Path hbaseWALRootDir =
145         TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbaselog"));
146     LOG.info(HConstants.HBASE_DIR + "=" + hbaseRootDir);
147     LOG.info(CommonFSUtils.HBASE_WAL_DIR + "=" + hbaseWALRootDir);
148     FSUtils.setRootDir(conf, hbaseRootDir);
149     FSUtils.setWALRootDir(conf, hbaseWALRootDir);
150   }
151 
152   @AfterClass
153   public static void tearDownAfterClass() throws Exception {
154     TEST_UTIL.shutdownMiniCluster();
155   }
156 
157   @Before
158   public void setUp() throws Exception {
159     this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
160     this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
161     this.hbaseRootDir = FSUtils.getRootDir(this.conf);
162     this.hbaseWALRootDir = FSUtils.getWALRootDir(this.conf);
163     this.oldLogDir = new Path(this.hbaseWALRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
164     this.logName = DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName() + "-manual");
165     this.logDir = new Path(this.hbaseWALRootDir, logName);
166     if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
167       TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
168     }
169     if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseWALRootDir)) {
170       TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true);
171     }
172     this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
173         RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
174     this.wals = new WALFactory(conf, null, currentTest.getMethodName());
175   }
176 
177   @After
178   public void tearDown() throws Exception {
179     this.wals.close();
180     TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
181     TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseWALRootDir, true);
182   }
183 
184   /*
185    * @param p Directory to cleanup
186    */
187   private void deleteDir(final Path p) throws IOException {
188     if (this.fs.exists(p)) {
189       if (!this.fs.delete(p, true)) {
190         throw new IOException("Failed remove of " + p);
191       }
192     }
193   }
194 
195   /**
196    *
197    * @throws Exception
198    */
199   @Test
200   public void testReplayEditsAfterRegionMovedWithMultiCF() throws Exception {
201     final TableName tableName =
202         TableName.valueOf("testReplayEditsAfterRegionMovedWithMultiCF");
203     byte[] family1 = Bytes.toBytes("cf1");
204     byte[] family2 = Bytes.toBytes("cf2");
205     byte[] qualifier = Bytes.toBytes("q");
206     byte[] value = Bytes.toBytes("testV");
207     byte[][] familys = { family1, family2 };
208     TEST_UTIL.createTable(tableName, familys);
209     Table htable = new HTable(TEST_UTIL.getConfiguration(), tableName);
210     Put put = new Put(Bytes.toBytes("r1"));
211     put.add(family1, qualifier, value);
212     htable.put(put);
213     ResultScanner resultScanner = htable.getScanner(new Scan());
214     int count = 0;
215     while (resultScanner.next() != null) {
216       count++;
217     }
218     resultScanner.close();
219     assertEquals(1, count);
220 
221     MiniHBaseCluster hbaseCluster = TEST_UTIL.getMiniHBaseCluster();
222     List<HRegion> regions = hbaseCluster.getRegions(tableName);
223     assertEquals(1, regions.size());
224 
225     // move region to another regionserver
226     Region destRegion = regions.get(0);
227     int originServerNum = hbaseCluster
228         .getServerWith(destRegion.getRegionInfo().getRegionName());
229     assertTrue("Please start more than 1 regionserver", hbaseCluster
230         .getRegionServerThreads().size() > 1);
231     int destServerNum = 0;
232     while (destServerNum == originServerNum) {
233       destServerNum++;
234     }
235     HRegionServer originServer = hbaseCluster.getRegionServer(originServerNum);
236     HRegionServer destServer = hbaseCluster.getRegionServer(destServerNum);
237     // move region to destination regionserver
238     TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), destServer.getServerName());
239 
240     // delete the row
241     Delete del = new Delete(Bytes.toBytes("r1"));
242     htable.delete(del);
243     resultScanner = htable.getScanner(new Scan());
244     count = 0;
245     while (resultScanner.next() != null) {
246       count++;
247     }
248     resultScanner.close();
249     assertEquals(0, count);
250 
251     // flush region and make major compaction
252     Region region =  destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName());
253     region.flush(true);
254     // wait to complete major compaction
255     for (Store store : region.getStores()) {
256       store.triggerMajorCompaction();
257     }
258     region.compact(true);
259 
260     // move region to origin regionserver
261     TEST_UTIL.moveRegionAndWait(destRegion.getRegionInfo(), originServer.getServerName());
262     // abort the origin regionserver
263     originServer.abort("testing");
264 
265     // see what we get
266     Result result = htable.get(new Get(Bytes.toBytes("r1")));
267     if (result != null) {
268       assertTrue("Row is deleted, but we get" + result.toString(),
269           (result == null) || result.isEmpty());
270     }
271     resultScanner.close();
272   }
273 
274   /**
275    * Tests for hbase-2727.
276    * @throws Exception
277    * @see https://issues.apache.org/jira/browse/HBASE-2727
278    */
279   @Test
280   public void test2727() throws Exception {
281     // Test being able to have > 1 set of edits in the recovered.edits directory.
282     // Ensure edits are replayed properly.
283     final TableName tableName =
284         TableName.valueOf("test2727");
285 
286     MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
287     HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
288     Path basedir = FSUtils.getWALTableDir(conf, tableName);
289     deleteDir(basedir);
290 
291     HTableDescriptor htd = createBasic3FamilyHTD(tableName);
292     HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
293     HRegion.closeHRegion(region2);
294     final byte [] rowName = tableName.getName();
295 
296     WAL wal1 = createWAL(this.conf);
297     // Add 1k to each family.
298     final int countPerFamily = 1000;
299 
300     for (HColumnDescriptor hcd: htd.getFamilies()) {
301       addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee,
302           wal1, htd, mvcc);
303     }
304     wal1.shutdown();
305     runWALSplit(this.conf);
306 
307     WAL wal2 = createWAL(this.conf);
308     // Add 1k to each family.
309     for (HColumnDescriptor hcd: htd.getFamilies()) {
310       addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily,
311           ee, wal2, htd, mvcc);
312     }
313     wal2.shutdown();
314     runWALSplit(this.conf);
315 
316     WAL wal3 = createWAL(this.conf);
317     try {
318       HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal3);
319       long seqid = region.getOpenSeqNum();
320       // The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1.
321       // When opened, this region would apply 6k edits, and increment the sequenceId by 1
322       assertTrue(seqid > mvcc.getWritePoint());
323       assertEquals(seqid - 1, mvcc.getWritePoint());
324       LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: "
325           + mvcc.getReadPoint());
326 
327       // TODO: Scan all.
328       region.close();
329     } finally {
330       wal3.close();
331     }
332   }
333 
334   /**
335    * Test case of HRegion that is only made out of bulk loaded files.  Assert
336    * that we don't 'crash'.
337    * @throws IOException
338    * @throws IllegalAccessException
339    * @throws NoSuchFieldException
340    * @throws IllegalArgumentException
341    * @throws SecurityException
342    */
343   @Test
344   public void testRegionMadeOfBulkLoadedFilesOnly()
345   throws IOException, SecurityException, IllegalArgumentException,
346       NoSuchFieldException, IllegalAccessException, InterruptedException {
347     final TableName tableName =
348         TableName.valueOf("testRegionMadeOfBulkLoadedFilesOnly");
349     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
350     final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
351     deleteDir(basedir);
352     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
353     HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
354     HRegion.closeHRegion(region2);
355     WAL wal = createWAL(this.conf);
356     HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
357 
358     byte [] family = htd.getFamilies().iterator().next().getName();
359     Path f =  new Path(basedir, "hfile");
360     HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(""),
361         Bytes.toBytes("z"), 10);
362     List <Pair<byte[],String>>  hfs= new ArrayList<Pair<byte[],String>>(1);
363     hfs.add(Pair.newPair(family, f.toString()));
364     region.bulkLoadHFiles(hfs, true, null, null);
365 
366     // Add an edit so something in the WAL
367     byte [] row = tableName.getName();
368     region.put((new Put(row)).add(family, family, family));
369     wal.sync();
370     final int rowsInsertedCount = 11;
371 
372     assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
373 
374     // Now 'crash' the region by stealing its wal
375     final Configuration newConf = HBaseConfiguration.create(this.conf);
376     User user = HBaseTestingUtility.getDifferentUser(newConf,
377         tableName.getNameAsString());
378     user.runAs(new PrivilegedExceptionAction() {
379       @Override
380       public Object run() throws Exception {
381         runWALSplit(newConf);
382         WAL wal2 = createWAL(newConf);
383 
384         HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf),
385           hbaseRootDir, hri, htd, wal2);
386         long seqid2 = region2.getOpenSeqNum();
387         assertTrue(seqid2 > -1);
388         assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));
389 
390         // I can't close wal1.  Its been appropriated when we split.
391         region2.close();
392         wal2.close();
393         return null;
394       }
395     });
396   }
397 
398   /**
399    * HRegion test case that is made of a major compacted HFile (created with three bulk loaded
400    * files) and an edit in the memstore.
401    * This is for HBASE-10958 "[dataloss] Bulk loading with seqids can prevent some log entries
402    * from being replayed"
403    * @throws IOException
404    * @throws IllegalAccessException
405    * @throws NoSuchFieldException
406    * @throws IllegalArgumentException
407    * @throws SecurityException
408    */
409   @Test
410   public void testCompactedBulkLoadedFiles()
411       throws IOException, SecurityException, IllegalArgumentException,
412       NoSuchFieldException, IllegalAccessException, InterruptedException {
413     final TableName tableName =
414         TableName.valueOf("testCompactedBulkLoadedFiles");
415     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
416     final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString());
417     deleteDir(basedir);
418     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
419     HRegion region2 = HRegion.createHRegion(hri,
420         hbaseRootDir, this.conf, htd);
421     HRegion.closeHRegion(region2);
422     WAL wal = createWAL(this.conf);
423     HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
424 
425     // Add an edit so something in the WAL
426     byte [] row = tableName.getName();
427     byte [] family = htd.getFamilies().iterator().next().getName();
428     region.put((new Put(row)).add(family, family, family));
429     wal.sync();
430 
431     List <Pair<byte[],String>>  hfs= new ArrayList<Pair<byte[],String>>(1);
432     for (int i = 0; i < 3; i++) {
433       Path f = new Path(basedir, "hfile"+i);
434       HFileTestUtil.createHFile(this.conf, fs, f, family, family, Bytes.toBytes(i + "00"),
435           Bytes.toBytes(i + "50"), 10);
436       hfs.add(Pair.newPair(family, f.toString()));
437     }
438     region.bulkLoadHFiles(hfs, true, null, null);
439     final int rowsInsertedCount = 31;
440     assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
441 
442     // major compact to turn all the bulk loaded files into one normal file
443     region.compact(true);
444     assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan())));
445 
446     // Now 'crash' the region by stealing its wal
447     final Configuration newConf = HBaseConfiguration.create(this.conf);
448     User user = HBaseTestingUtility.getDifferentUser(newConf,
449         tableName.getNameAsString());
450     user.runAs(new PrivilegedExceptionAction() {
451       @Override
452       public Object run() throws Exception {
453         runWALSplit(newConf);
454         WAL wal2 = createWAL(newConf);
455 
456         HRegion region2 = HRegion.openHRegion(newConf, FileSystem.get(newConf),
457             hbaseRootDir, hri, htd, wal2);
458         long seqid2 = region2.getOpenSeqNum();
459         assertTrue(seqid2 > -1);
460         assertEquals(rowsInsertedCount, getScannedCount(region2.getScanner(new Scan())));
461 
462         // I can't close wal1.  Its been appropriated when we split.
463         region2.close();
464         wal2.close();
465         return null;
466       }
467     });
468   }
469 
470 
471   /**
472    * Test writing edits into an HRegion, closing it, splitting logs, opening
473    * Region again.  Verify seqids.
474    * @throws IOException
475    * @throws IllegalAccessException
476    * @throws NoSuchFieldException
477    * @throws IllegalArgumentException
478    * @throws SecurityException
479    */
480   @Test
481   public void testReplayEditsWrittenViaHRegion()
482   throws IOException, SecurityException, IllegalArgumentException,
483       NoSuchFieldException, IllegalAccessException, InterruptedException {
484     final TableName tableName =
485         TableName.valueOf("testReplayEditsWrittenViaHRegion");
486     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
487     final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
488     deleteDir(basedir);
489     final byte[] rowName = tableName.getName();
490     final int countPerFamily = 10;
491     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
492     HRegion region3 = HRegion.createHRegion(hri,
493             hbaseRootDir, this.conf, htd);
494     HRegion.closeHRegion(region3);
495     // Write countPerFamily edits into the three families.  Do a flush on one
496     // of the families during the load of edits so its seqid is not same as
497     // others to test we do right thing when different seqids.
498     WAL wal = createWAL(this.conf);
499     HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
500     long seqid = region.getOpenSeqNum();
501     boolean first = true;
502     for (HColumnDescriptor hcd: htd.getFamilies()) {
503       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
504       if (first) {
505         // If first, so we have at least one family w/ different seqid to rest.
506         region.flush(true);
507         first = false;
508       }
509     }
510     // Now assert edits made it in.
511     final Get g = new Get(rowName);
512     Result result = region.get(g);
513     assertEquals(countPerFamily * htd.getFamilies().size(),
514       result.size());
515     // Now close the region (without flush), split the log, reopen the region and assert that
516     // replay of log has the correct effect, that our seqids are calculated correctly so
517     // all edits in logs are seen as 'stale'/old.
518     region.close(true);
519     wal.shutdown();
520     runWALSplit(this.conf);
521     WAL wal2 = createWAL(this.conf);
522     HRegion region2 = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal2);
523     long seqid2 = region2.getOpenSeqNum();
524     assertTrue(seqid + result.size() < seqid2);
525     final Result result1b = region2.get(g);
526     assertEquals(result.size(), result1b.size());
527 
528     // Next test.  Add more edits, then 'crash' this region by stealing its wal
529     // out from under it and assert that replay of the log adds the edits back
530     // correctly when region is opened again.
531     for (HColumnDescriptor hcd: htd.getFamilies()) {
532       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region2, "y");
533     }
534     // Get count of edits.
535     final Result result2 = region2.get(g);
536     assertEquals(2 * result.size(), result2.size());
537     wal2.sync();
538     final Configuration newConf = HBaseConfiguration.create(this.conf);
539     User user = HBaseTestingUtility.getDifferentUser(newConf,
540       tableName.getNameAsString());
541     user.runAs(new PrivilegedExceptionAction() {
542       @Override
543       public Object run() throws Exception {
544         runWALSplit(newConf);
545         FileSystem newFS = FileSystem.get(newConf);
546         // Make a new wal for new region open.
547         WAL wal3 = createWAL(newConf);
548         final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
549         HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
550           @Override
551           protected boolean restoreEdit(Store s, Cell cell) {
552             boolean b = super.restoreEdit(s, cell);
553             countOfRestoredEdits.incrementAndGet();
554             return b;
555           }
556         };
557         long seqid3 = region3.initialize();
558         Result result3 = region3.get(g);
559         // Assert that count of cells is same as before crash.
560         assertEquals(result2.size(), result3.size());
561         assertEquals(htd.getFamilies().size() * countPerFamily,
562           countOfRestoredEdits.get());
563 
564         // I can't close wal1.  Its been appropriated when we split.
565         region3.close();
566         wal3.close();
567         return null;
568       }
569     });
570   }
571 
572   /**
573    * Test that we recover correctly when there is a failure in between the
574    * flushes. i.e. Some stores got flushed but others did not.
575    *
576    * Unfortunately, there is no easy hook to flush at a store level. The way
577    * we get around this is by flushing at the region level, and then deleting
578    * the recently flushed store file for one of the Stores. This would put us
579    * back in the situation where all but that store got flushed and the region
580    * died.
581    *
582    * We restart Region again, and verify that the edits were replayed.
583    *
584    * @throws IOException
585    * @throws IllegalAccessException
586    * @throws NoSuchFieldException
587    * @throws IllegalArgumentException
588    * @throws SecurityException
589    */
590   @Test
591   public void testReplayEditsAfterPartialFlush()
592   throws IOException, SecurityException, IllegalArgumentException,
593       NoSuchFieldException, IllegalAccessException, InterruptedException {
594     final TableName tableName =
595         TableName.valueOf("testReplayEditsWrittenViaHRegion");
596     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
597     final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
598     deleteDir(basedir);
599     final byte[] rowName = tableName.getName();
600     final int countPerFamily = 10;
601     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
602     HRegion region3 = HRegion.createHRegion(hri,
603             hbaseRootDir, this.conf, htd);
604     HRegion.closeHRegion(region3);
605     // Write countPerFamily edits into the three families.  Do a flush on one
606     // of the families during the load of edits so its seqid is not same as
607     // others to test we do right thing when different seqids.
608     WAL wal = createWAL(this.conf);
609     HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
610     long seqid = region.getOpenSeqNum();
611     for (HColumnDescriptor hcd: htd.getFamilies()) {
612       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
613     }
614 
615     // Now assert edits made it in.
616     final Get g = new Get(rowName);
617     Result result = region.get(g);
618     assertEquals(countPerFamily * htd.getFamilies().size(),
619       result.size());
620 
621     // Let us flush the region
622     region.flush(true);
623     region.close(true);
624     wal.shutdown();
625 
626     // delete the store files in the second column family to simulate a failure
627     // in between the flushcache();
628     // we have 3 families. killing the middle one ensures that taking the maximum
629     // will make us fail.
630     int cf_count = 0;
631     for (HColumnDescriptor hcd: htd.getFamilies()) {
632       cf_count++;
633       if (cf_count == 2) {
634         region.getRegionFileSystem().deleteFamily(hcd.getNameAsString());
635       }
636     }
637 
638 
639     // Let us try to split and recover
640     runWALSplit(this.conf);
641     WAL wal2 = createWAL(this.conf);
642     HRegion region2 = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal2);
643     long seqid2 = region2.getOpenSeqNum();
644     assertTrue(seqid + result.size() < seqid2);
645 
646     final Result result1b = region2.get(g);
647     assertEquals(result.size(), result1b.size());
648   }
649 
650 
651   // StoreFlusher implementation used in testReplayEditsAfterAbortingFlush.
652   // Only throws exception if throwExceptionWhenFlushing is set true.
653   public static class CustomStoreFlusher extends DefaultStoreFlusher {
654     // Switch between throw and not throw exception in flush
655     static final AtomicBoolean throwExceptionWhenFlushing = new AtomicBoolean(false);
656 
657     public CustomStoreFlusher(Configuration conf, Store store) {
658       super(conf, store);
659     }
660     @Override
661     public List<Path> flushSnapshot(MemStoreSnapshot snapshot, long cacheFlushId,
662         MonitoredTask status, ThroughputController throughputController) throws IOException {
663       if (throwExceptionWhenFlushing.get()) {
664         throw new IOException("Simulated exception by tests");
665       }
666       return super.flushSnapshot(snapshot, cacheFlushId, status, throughputController);
667     }
668 
669   };
670 
671   /**
672    * Test that we could recover the data correctly after aborting flush. In the
673    * test, first we abort flush after writing some data, then writing more data
674    * and flush again, at last verify the data.
675    * @throws IOException
676    */
677   @Test
678   public void testReplayEditsAfterAbortingFlush() throws IOException {
679     final TableName tableName =
680         TableName.valueOf("testReplayEditsAfterAbortingFlush");
681     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
682     final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
683     deleteDir(basedir);
684     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
685     HRegion region3 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd);
686     region3.close();
687     region3.getWAL().close();
688     // Write countPerFamily edits into the three families. Do a flush on one
689     // of the families during the load of edits so its seqid is not same as
690     // others to test we do right thing when different seqids.
691     WAL wal = createWAL(this.conf);
692     RegionServerServices rsServices = Mockito.mock(RegionServerServices.class);
693     Mockito.doReturn(false).when(rsServices).isAborted();
694     when(rsServices.getServerName()).thenReturn(ServerName.valueOf("foo", 10, 10));
695     Configuration customConf = new Configuration(this.conf);
696     customConf.set(DefaultStoreEngine.DEFAULT_STORE_FLUSHER_CLASS_KEY,
697         CustomStoreFlusher.class.getName());
698     HRegion region =
699       HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal, customConf, rsServices, null);
700     int writtenRowCount = 10;
701     List<HColumnDescriptor> families = new ArrayList<HColumnDescriptor>(
702         htd.getFamilies());
703     for (int i = 0; i < writtenRowCount; i++) {
704       Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
705       put.add(families.get(i % families.size()).getName(), Bytes.toBytes("q"),
706           Bytes.toBytes("val"));
707       region.put(put);
708     }
709 
710     // Now assert edits made it in.
711     RegionScanner scanner = region.getScanner(new Scan());
712     assertEquals(writtenRowCount, getScannedCount(scanner));
713 
714     // Let us flush the region
715     CustomStoreFlusher.throwExceptionWhenFlushing.set(true);
716     try {
717       region.flush(true);
718       fail("Injected exception hasn't been thrown");
719     } catch (Throwable t) {
720       LOG.info("Expected simulated exception when flushing region,"
721           + t.getMessage());
722       // simulated to abort server
723       Mockito.doReturn(true).when(rsServices).isAborted();
724       region.setClosing(false); // region normally does not accept writes after
725       // DroppedSnapshotException. We mock around it for this test.
726     }
727     // writing more data
728     int moreRow = 10;
729     for (int i = writtenRowCount; i < writtenRowCount + moreRow; i++) {
730       Put put = new Put(Bytes.toBytes(tableName + Integer.toString(i)));
731       put.add(families.get(i % families.size()).getName(), Bytes.toBytes("q"),
732           Bytes.toBytes("val"));
733       region.put(put);
734     }
735     writtenRowCount += moreRow;
736     // call flush again
737     CustomStoreFlusher.throwExceptionWhenFlushing.set(false);
738     try {
739       region.flush(true);
740     } catch (IOException t) {
741       LOG.info("Expected exception when flushing region because server is stopped,"
742           + t.getMessage());
743     }
744 
745     region.close(true);
746     wal.shutdown();
747 
748     // Let us try to split and recover
749     runWALSplit(this.conf);
750     WAL wal2 = createWAL(this.conf);
751     Mockito.doReturn(false).when(rsServices).isAborted();
752     HRegion region2 =
753       HRegion.openHRegion(this.hbaseRootDir, hri, htd, wal2, this.conf, rsServices, null);
754     scanner = region2.getScanner(new Scan());
755     assertEquals(writtenRowCount, getScannedCount(scanner));
756   }
757 
758   private int getScannedCount(RegionScanner scanner) throws IOException {
759     int scannedCount = 0;
760     List<Cell> results = new ArrayList<Cell>();
761     while (true) {
762       boolean existMore = scanner.next(results);
763       if (!results.isEmpty())
764         scannedCount++;
765       if (!existMore)
766         break;
767       results.clear();
768     }
769     return scannedCount;
770   }
771 
772   /**
773    * Create an HRegion with the result of a WAL split and test we only see the
774    * good edits
775    * @throws Exception
776    */
777   @Test
778   public void testReplayEditsWrittenIntoWAL() throws Exception {
779     final TableName tableName =
780         TableName.valueOf("testReplayEditsWrittenIntoWAL");
781     final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
782     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
783     final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
784     deleteDir(basedir);
785 
786     final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
787     HRegion region2 = HRegion.createHRegion(hri,
788             hbaseRootDir, this.conf, htd);
789     HRegion.closeHRegion(region2);
790     final WAL wal = createWAL(this.conf);
791     final byte[] rowName = tableName.getName();
792     final byte[] regionName = hri.getEncodedNameAsBytes();
793 
794     // Add 1k to each family.
795     final int countPerFamily = 1000;
796     Set<byte[]> familyNames = new HashSet<byte[]>();
797     for (HColumnDescriptor hcd: htd.getFamilies()) {
798       addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily,
799           ee, wal, htd, mvcc);
800       familyNames.add(hcd.getName());
801     }
802 
803     // Add a cache flush, shouldn't have any effect
804     wal.startCacheFlush(regionName, familyNames);
805     wal.completeCacheFlush(regionName);
806 
807     // Add an edit to another family, should be skipped.
808     WALEdit edit = new WALEdit();
809     long now = ee.currentTime();
810     edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName,
811       now, rowName));
812     wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc), edit, true);
813 
814     // Delete the c family to verify deletes make it over.
815     edit = new WALEdit();
816     now = ee.currentTime();
817     edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
818     wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc), edit, true);
819 
820     // Sync.
821     wal.sync();
822     // Make a new conf and a new fs for the splitter to run on so we can take
823     // over old wal.
824     final Configuration newConf = HBaseConfiguration.create(this.conf);
825     User user = HBaseTestingUtility.getDifferentUser(newConf,
826       ".replay.wal.secondtime");
827     user.runAs(new PrivilegedExceptionAction<Void>() {
828       @Override
829       public Void run() throws Exception {
830         runWALSplit(newConf);
831         FileSystem newFS = FileSystem.get(newConf);
832         // 100k seems to make for about 4 flushes during HRegion#initialize.
833         newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
834         // Make a new wal for new region.
835         WAL newWal = createWAL(newConf);
836         final AtomicInteger flushcount = new AtomicInteger(0);
837         try {
838           final HRegion region =
839               new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
840             @Override
841             protected FlushResult internalFlushcache(final WAL wal, final long myseqid,
842                 final Collection<Store> storesToFlush, MonitoredTask status,
843                 boolean writeFlushWalMarker)
844                     throws IOException {
845               LOG.info("InternalFlushCache Invoked");
846               FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush,
847                   Mockito.mock(MonitoredTask.class), writeFlushWalMarker);
848               flushcount.incrementAndGet();
849               return fs;
850             }
851           };
852           // The seq id this region has opened up with
853           long seqid = region.initialize();
854 
855           // The mvcc readpoint of from inserting data.
856           long writePoint = mvcc.getWritePoint();
857 
858           // We flushed during init.
859           assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
860           assertTrue((seqid - 1) == writePoint);
861 
862           Get get = new Get(rowName);
863           Result result = region.get(get);
864           // Make sure we only see the good edits
865           assertEquals(countPerFamily * (htd.getFamilies().size() - 1),
866             result.size());
867           region.close();
868         } finally {
869           newWal.close();
870         }
871         return null;
872       }
873     });
874   }
875 
876   @Test
877   // the following test is for HBASE-6065
878   public void testSequentialEditLogSeqNum() throws IOException {
879     final TableName tableName = TableName.valueOf(currentTest.getMethodName());
880     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
881     final Path basedir =
882         FSUtils.getTableDir(this.hbaseRootDir, tableName);
883     deleteDir(basedir);
884     final byte[] rowName = tableName.getName();
885     final int countPerFamily = 10;
886     final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
887 
888     // Mock the WAL
889     MockWAL wal = createMockWAL();
890 
891     HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
892     for (HColumnDescriptor hcd : htd.getFamilies()) {
893       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
894     }
895 
896     // Let us flush the region
897     // But this time completeflushcache is not yet done
898     region.flush(true);
899     for (HColumnDescriptor hcd : htd.getFamilies()) {
900       addRegionEdits(rowName, hcd.getName(), 5, this.ee, region, "x");
901     }
902     long lastestSeqNumber = region.getSequenceId();
903     // get the current seq no
904     wal.doCompleteCacheFlush = true;
905     // allow complete cache flush with the previous seq number got after first
906     // set of edits.
907     wal.completeCacheFlush(hri.getEncodedNameAsBytes());
908     wal.shutdown();
909     FileStatus[] listStatus = wal.getFiles();
910     assertNotNull(listStatus);
911     assertTrue(listStatus.length > 0);
912     WALSplitter.splitLogFile(hbaseWALRootDir, listStatus[0],
913         this.fs, this.conf, null, null, null, mode, wals);
914     FileStatus[] listStatus1 = this.fs.listStatus(
915       new Path(FSUtils.getWALTableDir(this.conf, tableName), new Path(hri.getEncodedName(),
916           "recovered.edits")), new PathFilter() {
917         @Override
918         public boolean accept(Path p) {
919           if (WALSplitter.isSequenceIdFile(p)) {
920             return false;
921           }
922           return true;
923         }
924       });
925     int editCount = 0;
926     for (FileStatus fileStatus : listStatus1) {
927       editCount = Integer.parseInt(fileStatus.getPath().getName());
928     }
929     // The sequence number should be same
930     assertEquals(
931         "The sequence number of the recoverd.edits and the current edit seq should be same",
932         lastestSeqNumber, editCount);
933   }
934 
935   /**
936    * testcase for https://issues.apache.org/jira/browse/HBASE-15252
937    */
938   @Test
939   public void testDatalossWhenInputError() throws IOException, InstantiationException,
940       IllegalAccessException {
941     final TableName tableName = TableName.valueOf("testDatalossWhenInputError");
942     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
943     final Path basedir = FSUtils.getTableDir(this.hbaseRootDir, tableName);
944     deleteDir(basedir);
945     final byte[] rowName = tableName.getName();
946     final int countPerFamily = 10;
947     final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
948     HRegion region1 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.hbaseWALRootDir, this.conf, htd);
949     Path regionDir = region1.getWALRegionDir();
950     HBaseTestingUtility.closeRegionAndWAL(region1);
951 
952     WAL wal = createWAL(this.conf);
953     HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal);
954     for (HColumnDescriptor hcd : htd.getFamilies()) {
955       addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x");
956     }
957     // Now assert edits made it in.
958     final Get g = new Get(rowName);
959     Result result = region.get(g);
960     assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
961     // Now close the region (without flush), split the log, reopen the region and assert that
962     // replay of log has the correct effect.
963     region.close(true);
964     wal.shutdown();
965 
966     runWALSplit(this.conf);
967 
968     // here we let the DFSInputStream throw an IOException just after the WALHeader.
969     Path editFile = WALSplitter.getSplitEditFilesSorted(this.fs, regionDir).first();
970     FSDataInputStream stream = fs.open(editFile);
971     stream.seek(ProtobufLogReader.PB_WAL_MAGIC.length);
972     Class<? extends DefaultWALProvider.Reader> logReaderClass =
973         conf.getClass("hbase.regionserver.hlog.reader.impl", ProtobufLogReader.class,
974           DefaultWALProvider.Reader.class);
975     DefaultWALProvider.Reader reader = logReaderClass.newInstance();
976     reader.init(this.fs, editFile, conf, stream);
977     final long headerLength = stream.getPos();
978     reader.close();
979     FileSystem spyFs = spy(this.fs);
980     doAnswer(new Answer<FSDataInputStream>() {
981 
982       @Override
983       public FSDataInputStream answer(InvocationOnMock invocation) throws Throwable {
984         FSDataInputStream stream = (FSDataInputStream) invocation.callRealMethod();
985         Field field = FilterInputStream.class.getDeclaredField("in");
986         field.setAccessible(true);
987         final DFSInputStream in = (DFSInputStream) field.get(stream);
988         DFSInputStream spyIn = spy(in);
989         doAnswer(new Answer<Integer>() {
990 
991           private long pos;
992 
993           @Override
994           public Integer answer(InvocationOnMock invocation) throws Throwable {
995             if (pos >= headerLength) {
996               throw new IOException("read over limit");
997             }
998             int b = (Integer) invocation.callRealMethod();
999             if (b > 0) {
1000               pos += b;
1001             }
1002             return b;
1003           }
1004         }).when(spyIn).read(any(byte[].class), any(int.class), any(int.class));
1005         doAnswer(new Answer<Void>() {
1006 
1007           @Override
1008           public Void answer(InvocationOnMock invocation) throws Throwable {
1009             invocation.callRealMethod();
1010             in.close();
1011             return null;
1012           }
1013         }).when(spyIn).close();
1014         field.set(stream, spyIn);
1015         return stream;
1016       }
1017     }).when(spyFs).open(eq(editFile));
1018 
1019     WAL wal2 = createWAL(this.conf);
1020     HRegion region2;
1021     try {
1022       // log replay should fail due to the IOException, otherwise we may lose data.
1023       region2 = HRegion.openHRegion(conf, spyFs, hbaseRootDir, hri, htd, wal2);
1024       assertEquals(result.size(), region2.get(g).size());
1025     } catch (IOException e) {
1026       assertEquals("read over limit", e.getMessage());
1027     }
1028     region2 = HRegion.openHRegion(conf, fs, hbaseRootDir, hri, htd, wal2);
1029     assertEquals(result.size(), region2.get(g).size());
1030   }
1031 
1032   /**
1033    * testcase for https://issues.apache.org/jira/browse/HBASE-14949.
1034    */
1035   private void testNameConflictWhenSplit(boolean largeFirst) throws IOException {
1036     final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
1037     final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
1038     final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
1039     final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
1040     deleteDir(basedir);
1041 
1042     final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
1043     HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.hbaseWALRootDir, this.conf, htd);
1044     HBaseTestingUtility.closeRegionAndWAL(region);
1045     final byte[] family = htd.getColumnFamilies()[0].getName();
1046     final byte[] rowName = tableName.getName();
1047     FSWALEntry entry1 = createFSWALEntry(htd, hri, 1L, rowName, family, ee, mvcc, 1);
1048     FSWALEntry entry2 = createFSWALEntry(htd, hri, 2L, rowName, family, ee, mvcc, 2);
1049 
1050     Path largeFile = new Path(logDir, "wal-1");
1051     Path smallFile = new Path(logDir, "wal-2");
1052     writerWALFile(largeFile, Arrays.asList(entry1, entry2));
1053     writerWALFile(smallFile, Arrays.asList(entry2));
1054     FileStatus first, second;
1055     if (largeFirst) {
1056       first = fs.getFileStatus(largeFile);
1057       second = fs.getFileStatus(smallFile);
1058     } else {
1059       first = fs.getFileStatus(smallFile);
1060       second = fs.getFileStatus(largeFile);
1061     }
1062     WALSplitter.splitLogFile(hbaseWALRootDir, first, fs, conf, null, null, null,
1063       RecoveryMode.LOG_SPLITTING, wals);
1064     WALSplitter.splitLogFile(hbaseWALRootDir, second, fs, conf, null, null, null,
1065       RecoveryMode.LOG_SPLITTING, wals);
1066     WAL wal = createWAL(this.conf);
1067     region = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal);
1068     assertTrue(region.getOpenSeqNum() > mvcc.getWritePoint());
1069     assertEquals(2, region.get(new Get(rowName)).size());
1070   }
1071 
1072   @Test
1073   public void testNameConflictWhenSplit0() throws IOException {
1074     testNameConflictWhenSplit(true);
1075   }
1076 
1077   @Test
1078   public void testNameConflictWhenSplit1() throws IOException {
1079     testNameConflictWhenSplit(false);
1080   }
1081 
1082   static class MockWAL extends FSHLog {
1083     boolean doCompleteCacheFlush = false;
1084 
1085     public MockWAL(FileSystem fs, Path walRootDir, String logName, Configuration conf)
1086         throws IOException {
1087       super(fs, walRootDir, logName, HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
1088     }
1089 
1090     @Override
1091     public void completeCacheFlush(byte[] encodedRegionName) {
1092       if (!doCompleteCacheFlush) {
1093         return;
1094       }
1095       super.completeCacheFlush(encodedRegionName);
1096     }
1097   }
1098 
1099   private HTableDescriptor createBasic1FamilyHTD(final TableName tableName) {
1100     HTableDescriptor htd = new HTableDescriptor(tableName);
1101     HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
1102     htd.addFamily(a);
1103     return htd;
1104   }
1105 
1106   private MockWAL createMockWAL() throws IOException {
1107     MockWAL wal = new MockWAL(fs, hbaseWALRootDir, logName, conf);
1108     // Set down maximum recovery so we dfsclient doesn't linger retrying something
1109     // long gone.
1110     HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
1111     return wal;
1112   }
1113 
1114   // Flusher used in this test.  Keep count of how often we are called and
1115   // actually run the flush inside here.
1116   class TestFlusher implements FlushRequester {
1117 
1118     @Override
1119     public boolean requestFlush(Region region, boolean force) {
1120       try {
1121         region.flush(force);
1122         return true;
1123       } catch (IOException e) {
1124         throw new RuntimeException("Exception flushing", e);
1125       }
1126     }
1127 
1128     @Override
1129     public boolean requestDelayedFlush(Region region, long when, boolean forceFlushAllStores) {
1130       return true;
1131     }
1132 
1133     @Override
1134     public void registerFlushRequestListener(FlushRequestListener listener) {
1135 
1136     }
1137 
1138     @Override
1139     public boolean unregisterFlushRequestListener(FlushRequestListener listener) {
1140       return false;
1141     }
1142 
1143     @Override
1144     public void setGlobalMemstoreLimit(long globalMemStoreSize) {
1145 
1146     }
1147   }
1148 
1149   private WALKey createWALKey(final TableName tableName, final HRegionInfo hri,
1150       final MultiVersionConcurrencyControl mvcc) {
1151     return new WALKey(hri.getEncodedNameAsBytes(), tableName, 999, mvcc);
1152   }
1153 
1154   private WALEdit createWALEdit(final byte[] rowName, final byte[] family, EnvironmentEdge ee,
1155       int index) {
1156     byte[] qualifierBytes = Bytes.toBytes(Integer.toString(index));
1157     byte[] columnBytes = Bytes.toBytes(Bytes.toString(family) + ":" + Integer.toString(index));
1158     WALEdit edit = new WALEdit();
1159     edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
1160     return edit;
1161   }
1162 
1163   private FSWALEntry createFSWALEntry(HTableDescriptor htd, HRegionInfo hri, long sequence,
1164       byte[] rowName, byte[] family, EnvironmentEdge ee, MultiVersionConcurrencyControl mvcc,
1165       int index) throws IOException {
1166     FSWALEntry entry =
1167         new FSWALEntry(sequence, createWALKey(htd.getTableName(), hri, mvcc), createWALEdit(
1168           rowName, family, ee, index), htd, hri, true);
1169     entry.stampRegionSequenceId(mvcc.begin());
1170     return entry;
1171   }
1172 
1173   private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName,
1174       final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
1175       final HTableDescriptor htd, final MultiVersionConcurrencyControl mvcc) throws IOException {
1176     for (int j = 0; j < count; j++) {
1177       wal.append(htd, hri, createWALKey(tableName, hri, mvcc),
1178         createWALEdit(rowName, family, ee, j), true);
1179     }
1180     wal.sync();
1181   }
1182 
1183   static List<Put> addRegionEdits(final byte[] rowName, final byte[] family, final int count,
1184       EnvironmentEdge ee, final Region r, final String qualifierPrefix) throws IOException {
1185     List<Put> puts = new ArrayList<Put>();
1186     for (int j = 0; j < count; j++) {
1187       byte[] qualifier = Bytes.toBytes(qualifierPrefix + Integer.toString(j));
1188       Put p = new Put(rowName);
1189       p.add(family, qualifier, ee.currentTime(), rowName);
1190       r.put(p);
1191       puts.add(p);
1192     }
1193     return puts;
1194   }
1195 
1196   /*
1197    * Creates an HRI around an HTD that has <code>tableName</code> and three
1198    * column families named 'a','b', and 'c'.
1199    * @param tableName Name of table to use when we create HTableDescriptor.
1200    */
1201    private HRegionInfo createBasic3FamilyHRegionInfo(final TableName tableName) {
1202     return new HRegionInfo(tableName, null, null, false);
1203    }
1204 
1205   /*
1206    * Run the split.  Verify only single split file made.
1207    * @param c
1208    * @return The single split file made
1209    * @throws IOException
1210    */
1211   private Path runWALSplit(final Configuration c) throws IOException {
1212     List<Path> splits = WALSplitter.split(
1213       hbaseWALRootDir, logDir, oldLogDir, FSUtils.getWALFileSystem(c), c, wals);
1214     // Split should generate only 1 file since there's only 1 region
1215     assertEquals("splits=" + splits, 1, splits.size());
1216     // Make sure the file exists
1217     assertTrue(fs.exists(splits.get(0)));
1218     LOG.info("Split file=" + splits.get(0));
1219     return splits.get(0);
1220   }
1221 
1222   /*
1223    * @param c
1224    * @return WAL with retries set down from 5 to 1 only.
1225    * @throws IOException
1226    */
1227   private WAL createWAL(final Configuration c) throws IOException {
1228     FSHLog wal = new FSHLog(FSUtils.getWALFileSystem(c), hbaseWALRootDir, logName, c);
1229     // Set down maximum recovery so we dfsclient doesn't linger retrying something
1230     // long gone.
1231     HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
1232     return wal;
1233   }
1234 
1235   private HTableDescriptor createBasic3FamilyHTD(final TableName tableName) {
1236     HTableDescriptor htd = new HTableDescriptor(tableName);
1237     HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
1238     htd.addFamily(a);
1239     HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
1240     htd.addFamily(b);
1241     HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
1242     htd.addFamily(c);
1243     return htd;
1244   }
1245 
1246   private void writerWALFile(Path file, List<FSWALEntry> entries) throws IOException {
1247     fs.mkdirs(file.getParent());
1248     ProtobufLogWriter writer = new ProtobufLogWriter();
1249     writer.init(fs, file, conf, true);
1250     for (FSWALEntry entry : entries) {
1251       writer.append(entry);
1252     }
1253     writer.sync(false);
1254     writer.close();
1255   }
1256 }