View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.wal;
20  
21  import static org.junit.Assert.assertArrayEquals;
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotNull;
25  import static org.junit.Assert.assertNull;
26  import static org.junit.Assert.assertTrue;
27  import static org.junit.Assert.fail;
28  
29  import java.io.IOException;
30  import java.io.InputStream;
31  import java.lang.reflect.Method;
32  import java.net.BindException;
33  import java.util.ArrayList;
34  import java.util.List;
35  import java.util.NavigableMap;
36  import java.util.TreeMap;
37  import java.util.concurrent.atomic.AtomicBoolean;
38  
39  import org.apache.commons.logging.Log;
40  import org.apache.commons.logging.LogFactory;
41  import org.apache.hadoop.conf.Configuration;
42  import org.apache.hadoop.fs.FSDataInputStream;
43  import org.apache.hadoop.fs.FSDataOutputStream;
44  import org.apache.hadoop.fs.FileStatus;
45  import org.apache.hadoop.fs.FileSystem;
46  import org.apache.hadoop.fs.Path;
47  import org.apache.hadoop.hbase.Cell;
48  import org.apache.hadoop.hbase.Coprocessor;
49  import org.apache.hadoop.hbase.HBaseTestingUtility;
50  import org.apache.hadoop.hbase.HColumnDescriptor;
51  import org.apache.hadoop.hbase.HConstants;
52  import org.apache.hadoop.hbase.HRegionInfo;
53  import org.apache.hadoop.hbase.HTableDescriptor;
54  import org.apache.hadoop.hbase.KeyValue;
55  import org.apache.hadoop.hbase.TableName;
56  import org.apache.hadoop.hbase.codec.Codec;
57  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
58  import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
59  import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
60  import org.apache.hadoop.hbase.regionserver.wal.CompressionContext;
61  import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
62  import org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader;
63  import org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogWriter;
64  import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
65  import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
66  import org.apache.hadoop.hbase.regionserver.wal.WALCoprocessorHost;
67  import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
68  import org.apache.hadoop.hbase.testclassification.MediumTests;
69  import org.apache.hadoop.hbase.util.Bytes;
70  import org.apache.hadoop.hbase.util.CommonFSUtils;
71  import org.apache.hadoop.hbase.util.FSUtils;
72  import org.apache.hadoop.hbase.util.Threads;
73  import org.apache.hadoop.hdfs.DistributedFileSystem;
74  import org.apache.hadoop.hdfs.MiniDFSCluster;
75  import org.apache.hadoop.hdfs.protocol.HdfsConstants;
76  import org.junit.After;
77  import org.junit.AfterClass;
78  import org.junit.Before;
79  import org.junit.BeforeClass;
80  import org.junit.Rule;
81  import org.junit.Test;
82  import org.junit.experimental.categories.Category;
83  import org.junit.rules.TestName;
84  
85  /**
86   * WAL tests that can be reused across providers.
87   */
88  @Category(MediumTests.class)
89  public class TestWALFactory {
90    private static final Log LOG = LogFactory.getLog(TestWALFactory.class);
91  
92    protected static Configuration conf;
93    private static MiniDFSCluster cluster;
94    protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
95    protected static Path hbaseDir;
96    protected static Path hbaseWALDir;
97  
98    protected FileSystem fs;
99    protected Path dir;
100   protected WALFactory wals;
101 
102   @Rule
103   public final TestName currentTest = new TestName();
104 
105   @Before
106   public void setUp() throws Exception {
107     fs = cluster.getFileSystem();
108     dir = new Path(hbaseDir, currentTest.getMethodName());
109     wals = new WALFactory(conf, null, currentTest.getMethodName());
110   }
111 
112   @After
113   public void tearDown() throws Exception {
114     // testAppendClose closes the FileSystem, which will prevent us from closing cleanly here.
115     try {
116       wals.close();
117     } catch (IOException exception) {
118       LOG.warn("Encountered exception while closing wal factory. If you have other errors, this" +
119           " may be the cause. Message: " + exception);
120       LOG.debug("Exception details for failure to close wal factory.", exception);
121     }
122     FileStatus[] entries = fs.listStatus(new Path("/"));
123     for (FileStatus dir : entries) {
124       fs.delete(dir.getPath(), true);
125     }
126   }
127 
128   @BeforeClass
129   public static void setUpBeforeClass() throws Exception {
130     FSUtils.setWALRootDir(TEST_UTIL.getConfiguration(), new Path("file:///tmp/wal"));
131     // Make block sizes small.
132     TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
133     // needed for testAppendClose()
134     TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
135     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
136     // quicker heartbeat interval for faster DN death notification
137     TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
138     TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
139     TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
140 
141     // faster failover with cluster.shutdown();fs.close() idiom
142     TEST_UTIL.getConfiguration()
143         .setInt("hbase.ipc.client.connect.max.retries", 1);
144     TEST_UTIL.getConfiguration().setInt(
145         "dfs.client.block.recovery.retries", 1);
146     TEST_UTIL.getConfiguration().setInt(
147       "hbase.ipc.client.connection.maxidletime", 500);
148     TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
149         SampleRegionWALObserver.class.getName());
150     TEST_UTIL.startMiniDFSCluster(3);
151 
152     conf = TEST_UTIL.getConfiguration();
153     cluster = TEST_UTIL.getDFSCluster();
154 
155     hbaseDir = TEST_UTIL.createRootDir();
156     hbaseWALDir = TEST_UTIL.createWALRootDir();
157   }
158 
159   @AfterClass
160   public static void tearDownAfterClass() throws Exception {
161     TEST_UTIL.shutdownMiniCluster();
162   }
163 
164   @Test
165   public void canCloseSingleton() throws IOException {
166     WALFactory.getInstance(conf).close();
167   }
168 
169   /**
170    * Just write multiple logs then split.  Before fix for HADOOP-2283, this
171    * would fail.
172    * @throws IOException
173    */
174   @Test
175   public void testSplit() throws IOException {
176     final TableName tableName = TableName.valueOf(currentTest.getMethodName());
177     final byte [] rowName = tableName.getName();
178     final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
179     final Path logdir = new Path(hbaseWALDir,
180         DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
181     Path oldLogDir = new Path(hbaseWALDir, HConstants.HREGION_OLDLOGDIR_NAME);
182     final int howmany = 3;
183     HRegionInfo[] infos = new HRegionInfo[3];
184     Path tableDataDir = CommonFSUtils.getTableDir(hbaseDir, tableName);
185     fs.mkdirs(tableDataDir);
186     Path tabledir = FSUtils.getWALTableDir(conf, tableName);
187     fs.mkdirs(tabledir);
188     for(int i = 0; i < howmany; i++) {
189       infos[i] = new HRegionInfo(tableName,
190                 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
191       fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
192       fs.mkdirs(new Path(tableDataDir, infos[i].getEncodedName()));
193       LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
194     }
195     HTableDescriptor htd = new HTableDescriptor(tableName);
196     htd.addFamily(new HColumnDescriptor("column"));
197 
198     // Add edits for three regions.
199     for (int ii = 0; ii < howmany; ii++) {
200       for (int i = 0; i < howmany; i++) {
201         final WAL log =
202             wals.getWAL(infos[i].getEncodedNameAsBytes(), infos[i].getTable().getNamespace());
203         for (int j = 0; j < howmany; j++) {
204           WALEdit edit = new WALEdit();
205           byte [] family = Bytes.toBytes("column");
206           byte [] qualifier = Bytes.toBytes(Integer.toString(j));
207           byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
208           edit.add(new KeyValue(rowName, family, qualifier,
209               System.currentTimeMillis(), column));
210           LOG.info("Region " + i + ": " + edit);
211           WALKey walKey =  new WALKey(infos[i].getEncodedNameAsBytes(), tableName,
212               System.currentTimeMillis(), mvcc);
213           log.append(htd, infos[i], walKey, edit, true);
214           walKey.getWriteEntry();
215         }
216         log.sync();
217         log.rollWriter(true);
218       }
219     }
220     wals.shutdown();
221     List<Path> splits = WALSplitter.split(hbaseWALDir, logdir, oldLogDir, fs, conf, wals);
222     verifySplits(splits, howmany);
223   }
224 
225   /**
226    * Test new HDFS-265 sync.
227    * @throws Exception
228    */
229   @Test
230   public void Broken_testSync() throws Exception {
231     TableName tableName = TableName.valueOf(currentTest.getMethodName());
232     MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
233     // First verify that using streams all works.
234     Path p = new Path(dir, currentTest.getMethodName() + ".fsdos");
235     FSDataOutputStream out = fs.create(p);
236     out.write(tableName.getName());
237     Method syncMethod = null;
238     try {
239       syncMethod = out.getClass().getMethod("hflush", new Class<?> []{});
240     } catch (NoSuchMethodException e) {
241       try {
242         syncMethod = out.getClass().getMethod("sync", new Class<?> []{});
243       } catch (NoSuchMethodException ex) {
244         fail("This version of Hadoop supports neither Syncable.sync() " +
245             "nor Syncable.hflush().");
246       }
247     }
248     syncMethod.invoke(out, new Object[]{});
249     FSDataInputStream in = fs.open(p);
250     assertTrue(in.available() > 0);
251     byte [] buffer = new byte [1024];
252     int read = in.read(buffer);
253     assertEquals(tableName.getName().length, read);
254     out.close();
255     in.close();
256 
257     final int total = 20;
258     WAL.Reader reader = null;
259 
260     try {
261       HRegionInfo info = new HRegionInfo(tableName,
262                   null,null, false);
263       HTableDescriptor htd = new HTableDescriptor();
264       htd.addFamily(new HColumnDescriptor(tableName.getName()));
265       final WAL wal = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
266 
267       for (int i = 0; i < total; i++) {
268         WALEdit kvs = new WALEdit();
269         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
270         wal.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
271             System.currentTimeMillis(), mvcc), kvs, true);
272       }
273       // Now call sync and try reading.  Opening a Reader before you sync just
274       // gives you EOFE.
275       wal.sync();
276       // Open a Reader.
277       Path walPath = DefaultWALProvider.getCurrentFileName(wal);
278       reader = wals.createReader(fs, walPath);
279       int count = 0;
280       WAL.Entry entry = new WAL.Entry();
281       while ((entry = reader.next(entry)) != null) count++;
282       assertEquals(total, count);
283       reader.close();
284       // Add test that checks to see that an open of a Reader works on a file
285       // that has had a sync done on it.
286       for (int i = 0; i < total; i++) {
287         WALEdit kvs = new WALEdit();
288         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
289         wal.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
290             System.currentTimeMillis(), mvcc), kvs, true);
291       }
292       wal.sync();
293       reader = wals.createReader(fs, walPath);
294       count = 0;
295       while((entry = reader.next(entry)) != null) count++;
296       assertTrue(count >= total);
297       reader.close();
298       // If I sync, should see double the edits.
299       wal.sync();
300       reader = wals.createReader(fs, walPath);
301       count = 0;
302       while((entry = reader.next(entry)) != null) count++;
303       assertEquals(total * 2, count);
304       reader.close();
305       // Now do a test that ensures stuff works when we go over block boundary,
306       // especially that we return good length on file.
307       final byte [] value = new byte[1025 * 1024];  // Make a 1M value.
308       for (int i = 0; i < total; i++) {
309         WALEdit kvs = new WALEdit();
310         kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), value));
311         wal.append(htd, info, new WALKey(info.getEncodedNameAsBytes(), tableName,
312             System.currentTimeMillis(), mvcc), kvs,  true);
313       }
314       // Now I should have written out lots of blocks.  Sync then read.
315       wal.sync();
316       reader = wals.createReader(fs, walPath);
317       count = 0;
318       while((entry = reader.next(entry)) != null) count++;
319       assertEquals(total * 3, count);
320       reader.close();
321       // shutdown and ensure that Reader gets right length also.
322       wal.shutdown();
323       reader = wals.createReader(fs, walPath);
324       count = 0;
325       while((entry = reader.next(entry)) != null) count++;
326       assertEquals(total * 3, count);
327       reader.close();
328     } finally {
329       if (reader != null) reader.close();
330     }
331   }
332 
333   private void verifySplits(final List<Path> splits, final int howmany)
334   throws IOException {
335     assertEquals(howmany * howmany, splits.size());
336     for (int i = 0; i < splits.size(); i++) {
337       LOG.info("Verifying=" + splits.get(i));
338       WAL.Reader reader = wals.createReader(fs, splits.get(i));
339       try {
340         int count = 0;
341         String previousRegion = null;
342         long seqno = -1;
343         WAL.Entry entry = new WAL.Entry();
344         while((entry = reader.next(entry)) != null) {
345           WALKey key = entry.getKey();
346           String region = Bytes.toString(key.getEncodedRegionName());
347           // Assert that all edits are for same region.
348           if (previousRegion != null) {
349             assertEquals(previousRegion, region);
350           }
351           LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
352           assertTrue(seqno < key.getLogSeqNum());
353           seqno = key.getLogSeqNum();
354           previousRegion = region;
355           count++;
356         }
357         assertEquals(howmany, count);
358       } finally {
359         reader.close();
360       }
361     }
362   }
363 
364   /*
365    * We pass different values to recoverFileLease() so that different code paths are covered
366    *
367    * For this test to pass, requires:
368    * 1. HDFS-200 (append support)
369    * 2. HDFS-988 (SafeMode should freeze file operations
370    *              [FSNamesystem.nextGenerationStampForBlock])
371    * 3. HDFS-142 (on restart, maintain pendingCreates)
372    */
373   @Test (timeout=300000)
374   public void testAppendClose() throws Exception {
375     TableName tableName =
376         TableName.valueOf(currentTest.getMethodName());
377     HRegionInfo regioninfo = new HRegionInfo(tableName,
378              HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
379 
380     final WAL wal =
381         wals.getWAL(regioninfo.getEncodedNameAsBytes(), regioninfo.getTable().getNamespace());
382     final int total = 20;
383 
384     HTableDescriptor htd = new HTableDescriptor();
385     htd.addFamily(new HColumnDescriptor(tableName.getName()));
386     MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
387     for (int i = 0; i < total; i++) {
388       WALEdit kvs = new WALEdit();
389       kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
390       wal.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
391           System.currentTimeMillis(), mvcc), kvs,  true);
392     }
393     // Now call sync to send the data to HDFS datanodes
394     wal.sync();
395      int namenodePort = cluster.getNameNodePort();
396     final Path walPath = DefaultWALProvider.getCurrentFileName(wal);
397 
398 
399     // Stop the cluster.  (ensure restart since we're sharing MiniDFSCluster)
400     try {
401       DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
402       dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
403       TEST_UTIL.shutdownMiniDFSCluster();
404       try {
405         // wal.writer.close() will throw an exception,
406         // but still call this since it closes the LogSyncer thread first
407         wal.shutdown();
408       } catch (IOException e) {
409         LOG.info(e);
410       }
411       fs.close(); // closing FS last so DFSOutputStream can't call close
412       LOG.info("STOPPED first instance of the cluster");
413     } finally {
414       // Restart the cluster
415       while (cluster.isClusterUp()){
416         LOG.error("Waiting for cluster to go down");
417         Thread.sleep(1000);
418       }
419       assertFalse(cluster.isClusterUp());
420       cluster = null;
421       for (int i = 0; i < 100; i++) {
422         try {
423           cluster = TEST_UTIL.startMiniDFSClusterForTestWAL(namenodePort);
424           break;
425         } catch (BindException e) {
426           LOG.info("Sleeping.  BindException bringing up new cluster");
427           Threads.sleep(1000);
428         }
429       }
430       cluster.waitActive();
431       fs = cluster.getFileSystem();
432       LOG.info("STARTED second instance.");
433     }
434 
435     // set the lease period to be 1 second so that the
436     // namenode triggers lease recovery upon append request
437     Method setLeasePeriod = cluster.getClass()
438       .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
439     setLeasePeriod.setAccessible(true);
440     setLeasePeriod.invoke(cluster, 1000L, 1000L);
441     try {
442       Thread.sleep(1000);
443     } catch (InterruptedException e) {
444       LOG.info(e);
445     }
446 
447     // Now try recovering the log, like the HMaster would do
448     final FileSystem recoveredFs = fs;
449     final Configuration rlConf = conf;
450 
451     class RecoverLogThread extends Thread {
452       public Exception exception = null;
453       public void run() {
454           try {
455             FSUtils.getInstance(fs, rlConf)
456               .recoverFileLease(recoveredFs, walPath, rlConf, null);
457           } catch (IOException e) {
458             exception = e;
459           }
460       }
461     }
462 
463     RecoverLogThread t = new RecoverLogThread();
464     t.start();
465     // Timeout after 60 sec. Without correct patches, would be an infinite loop
466     t.join(60 * 1000);
467     if(t.isAlive()) {
468       t.interrupt();
469       throw new Exception("Timed out waiting for WAL.recoverLog()");
470     }
471 
472     if (t.exception != null)
473       throw t.exception;
474 
475     // Make sure you can read all the content
476     WAL.Reader reader = wals.createReader(fs, walPath);
477     int count = 0;
478     WAL.Entry entry = new WAL.Entry();
479     while (reader.next(entry) != null) {
480       count++;
481       assertTrue("Should be one KeyValue per WALEdit",
482                   entry.getEdit().getCells().size() == 1);
483     }
484     assertEquals(total, count);
485     reader.close();
486 
487     // Reset the lease period
488     setLeasePeriod.invoke(cluster, new Object[]{new Long(60000), new Long(3600000)});
489   }
490 
491   /**
492    * Tests that we can write out an edit, close, and then read it back in again.
493    * @throws IOException
494    */
495   @Test
496   public void testEditAdd() throws IOException {
497     final int COL_COUNT = 10;
498     final HTableDescriptor htd =
499         new HTableDescriptor(TableName.valueOf("tablename")).addFamily(new HColumnDescriptor(
500             "column"));
501     final byte [] row = Bytes.toBytes("row");
502     WAL.Reader reader = null;
503     try {
504       final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
505 
506       // Write columns named 1, 2, 3, etc. and then values of single byte
507       // 1, 2, 3...
508       long timestamp = System.currentTimeMillis();
509       WALEdit cols = new WALEdit();
510       for (int i = 0; i < COL_COUNT; i++) {
511         cols.add(new KeyValue(row, Bytes.toBytes("column"),
512             Bytes.toBytes(Integer.toString(i)),
513           timestamp, new byte[] { (byte)(i + '0') }));
514       }
515       HRegionInfo info = new HRegionInfo(htd.getTableName(),
516         row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
517       final WAL log = wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace());
518 
519       final long txid = log.append(htd, info,
520         new WALKey(info.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
521             mvcc),
522         cols, true);
523       log.sync(txid);
524       log.startCacheFlush(info.getEncodedNameAsBytes(), htd.getFamiliesKeys());
525       log.completeCacheFlush(info.getEncodedNameAsBytes());
526       log.shutdown();
527       Path filename = DefaultWALProvider.getCurrentFileName(log);
528       // Now open a reader on the log and assert append worked.
529       reader = wals.createReader(fs, filename);
530       // Above we added all columns on a single row so we only read one
531       // entry in the below... thats why we have '1'.
532       for (int i = 0; i < 1; i++) {
533         WAL.Entry entry = reader.next(null);
534         if (entry == null) break;
535         WALKey key = entry.getKey();
536         WALEdit val = entry.getEdit();
537         assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
538         assertTrue(htd.getTableName().equals(key.getTablename()));
539         Cell cell = val.getCells().get(0);
540         assertTrue(Bytes.equals(row, cell.getRow()));
541         assertEquals((byte)(i + '0'), cell.getValue()[0]);
542         System.out.println(key + " " + val);
543       }
544     } finally {
545       if (reader != null) {
546         reader.close();
547       }
548     }
549   }
550 
551   /**
552    * @throws IOException
553    */
554   @Test
555   public void testAppend() throws IOException {
556     final int COL_COUNT = 10;
557     final HTableDescriptor htd =
558         new HTableDescriptor(TableName.valueOf("tablename")).addFamily(new HColumnDescriptor(
559             "column"));
560     final byte [] row = Bytes.toBytes("row");
561     WAL.Reader reader = null;
562     final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
563     try {
564       // Write columns named 1, 2, 3, etc. and then values of single byte
565       // 1, 2, 3...
566       long timestamp = System.currentTimeMillis();
567       WALEdit cols = new WALEdit();
568       for (int i = 0; i < COL_COUNT; i++) {
569         cols.add(new KeyValue(row, Bytes.toBytes("column"),
570           Bytes.toBytes(Integer.toString(i)),
571           timestamp, new byte[] { (byte)(i + '0') }));
572       }
573       HRegionInfo hri = new HRegionInfo(htd.getTableName(),
574           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
575       final WAL log = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
576       final long txid = log.append(htd, hri,
577         new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
578             mvcc),
579         cols, true);
580       log.sync(txid);
581       log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
582       log.completeCacheFlush(hri.getEncodedNameAsBytes());
583       log.shutdown();
584       Path filename = DefaultWALProvider.getCurrentFileName(log);
585       // Now open a reader on the log and assert append worked.
586       reader = wals.createReader(fs, filename);
587       WAL.Entry entry = reader.next();
588       assertEquals(COL_COUNT, entry.getEdit().size());
589       int idx = 0;
590       for (Cell val : entry.getEdit().getCells()) {
591         assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
592           entry.getKey().getEncodedRegionName()));
593         assertTrue(htd.getTableName().equals(entry.getKey().getTablename()));
594         assertTrue(Bytes.equals(row, val.getRow()));
595         assertEquals((byte)(idx + '0'), val.getValue()[0]);
596         System.out.println(entry.getKey() + " " + val);
597         idx++;
598       }
599     } finally {
600       if (reader != null) {
601         reader.close();
602       }
603     }
604   }
605 
606   /**
607    * Test that we can visit entries before they are appended
608    * @throws Exception
609    */
610   @Test
611   public void testVisitors() throws Exception {
612     final int COL_COUNT = 10;
613     final TableName tableName =
614         TableName.valueOf("tablename");
615     final byte [] row = Bytes.toBytes("row");
616     final DumbWALActionsListener visitor = new DumbWALActionsListener();
617     final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
618     long timestamp = System.currentTimeMillis();
619     HTableDescriptor htd = new HTableDescriptor();
620     htd.addFamily(new HColumnDescriptor("column"));
621 
622     HRegionInfo hri = new HRegionInfo(tableName,
623         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
624     final WAL log = wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
625     log.registerWALActionsListener(visitor);
626     for (int i = 0; i < COL_COUNT; i++) {
627       WALEdit cols = new WALEdit();
628       cols.add(new KeyValue(row, Bytes.toBytes("column"),
629           Bytes.toBytes(Integer.toString(i)),
630           timestamp, new byte[]{(byte) (i + '0')}));
631       log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
632           System.currentTimeMillis(), mvcc), cols, true);
633     }
634     log.sync();
635     assertEquals(COL_COUNT, visitor.increments);
636     log.unregisterWALActionsListener(visitor);
637     WALEdit cols = new WALEdit();
638     cols.add(new KeyValue(row, Bytes.toBytes("column"),
639         Bytes.toBytes(Integer.toString(11)),
640         timestamp, new byte[]{(byte) (11 + '0')}));
641     log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
642         System.currentTimeMillis(), mvcc), cols, true);
643     log.sync();
644     assertEquals(COL_COUNT, visitor.increments);
645   }
646 
647   /**
648    * A loaded WAL coprocessor won't break existing WAL test cases.
649    */
650   @Test
651   public void testWALCoprocessorLoaded() throws Exception {
652     // test to see whether the coprocessor is loaded or not.
653     WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION, null).getCoprocessorHost();
654     Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
655     assertNotNull(c);
656   }
657 
658   /**
659    * @throws IOException
660    */
661   @Test
662   public void testReadLegacyLog() throws IOException {
663     final int columnCount = 5;
664     final int recordCount = 5;
665     final TableName tableName =
666         TableName.valueOf("tablename");
667     final byte[] row = Bytes.toBytes("row");
668     long timestamp = System.currentTimeMillis();
669     Path path = new Path(dir, "tempwal");
670     SequenceFileLogWriter sflw = null;
671     WAL.Reader reader = null;
672     try {
673       HRegionInfo hri = new HRegionInfo(tableName,
674           HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
675       HTableDescriptor htd = new HTableDescriptor(tableName);
676       fs.mkdirs(dir);
677       // Write log in pre-PB format.
678       sflw = new SequenceFileLogWriter();
679       sflw.init(fs, path, conf, false);
680       for (int i = 0; i < recordCount; ++i) {
681         WALKey key = new HLogKey(
682             hri.getEncodedNameAsBytes(), tableName, i, timestamp, HConstants.DEFAULT_CLUSTER_ID);
683         WALEdit edit = new WALEdit();
684         for (int j = 0; j < columnCount; ++j) {
685           if (i == 0) {
686             htd.addFamily(new HColumnDescriptor("column" + j));
687           }
688           String value = i + "" + j;
689           edit.add(new KeyValue(row, row, row, timestamp, Bytes.toBytes(value)));
690         }
691         sflw.append(new WAL.Entry(key, edit));
692       }
693       sflw.sync(false);
694       sflw.close();
695 
696       // Now read the log using standard means.
697       reader = wals.createReader(fs, path);
698       assertTrue(reader instanceof SequenceFileLogReader);
699       for (int i = 0; i < recordCount; ++i) {
700         WAL.Entry entry = reader.next();
701         assertNotNull(entry);
702         assertEquals(columnCount, entry.getEdit().size());
703         assertArrayEquals(hri.getEncodedNameAsBytes(), entry.getKey().getEncodedRegionName());
704         assertEquals(tableName, entry.getKey().getTablename());
705         int idx = 0;
706         for (Cell val : entry.getEdit().getCells()) {
707           assertTrue(Bytes.equals(row, val.getRow()));
708           String value = i + "" + idx;
709           assertArrayEquals(Bytes.toBytes(value), val.getValue());
710           idx++;
711         }
712       }
713       WAL.Entry entry = reader.next();
714       assertNull(entry);
715     } finally {
716       if (sflw != null) {
717         sflw.close();
718       }
719       if (reader != null) {
720         reader.close();
721       }
722     }
723   }
724 
725   @Test
726   public void testReaderClosedOnBadCodec() throws IOException {
727     // Create our own Configuration and WALFactory to avoid breaking other test methods
728     Configuration confWithCodec = new Configuration(conf);
729     confWithCodec.setClass(
730         WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, BrokenWALCellCodec.class, Codec.class);
731     WALFactory customFactory = new WALFactory(confWithCodec, null, currentTest.getMethodName());
732 
733     // Hack a Proxy over the FileSystem so that we can track the InputStreams opened by
734     // the FileSystem and know if close() was called on those InputStreams.
735     final List<InputStreamProxy> openedReaders = new ArrayList<>();
736     FileSystemProxy proxyFs = new FileSystemProxy(fs) {
737       @Override
738       public FSDataInputStream open(Path p) throws IOException {
739         InputStreamProxy is = new InputStreamProxy(super.open(p));
740         openedReaders.add(is);
741         return is;
742       }
743 
744       @Override
745       public FSDataInputStream open(Path p, int blockSize) throws IOException {
746         InputStreamProxy is = new InputStreamProxy(super.open(p, blockSize));
747         openedReaders.add(is);
748         return is;
749       }
750     };
751 
752     final HTableDescriptor htd =
753         new HTableDescriptor(TableName.valueOf(currentTest.getMethodName()));
754     htd.addFamily(new HColumnDescriptor(Bytes.toBytes("column")));
755 
756     HRegionInfo hri = new HRegionInfo(htd.getTableName(),
757         HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
758 
759     NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
760     for (HColumnDescriptor colDesc : htd.getColumnFamilies()) {
761       scopes.put(colDesc.getName(), 0);
762     }
763     byte[] row = Bytes.toBytes("row");
764     WAL.Reader reader = null;
765     final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);
766     try {
767       // Write one column in one edit.
768       WALEdit cols = new WALEdit();
769       cols.add(new KeyValue(row, Bytes.toBytes("column"),
770         Bytes.toBytes("0"), System.currentTimeMillis(), new byte[] { 0 }));
771       final WAL log = customFactory.getWAL(
772           hri.getEncodedNameAsBytes(), hri.getTable().getNamespace());
773       final long txid = log.append(htd, hri,
774         new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(), System.currentTimeMillis(),
775             mvcc),
776         cols, true);
777       // Sync the edit to the WAL
778       log.sync(txid);
779       log.startCacheFlush(hri.getEncodedNameAsBytes(), htd.getFamiliesKeys());
780       log.completeCacheFlush(hri.getEncodedNameAsBytes());
781       log.shutdown();
782 
783       // Inject our failure, object is constructed via reflection.
784       BrokenWALCellCodec.THROW_FAILURE_ON_INIT.set(true);
785 
786       // Now open a reader on the log which will throw an exception when
787       // we try to instantiate the custom Codec.
788       Path filename = DefaultWALProvider.getCurrentFileName(log);
789       try {
790         reader = customFactory.createReader(proxyFs, filename);
791         fail("Expected to see an exception when creating WAL reader");
792       } catch (Exception e) {
793         // Expected that we get an exception
794       }
795       // We should have exactly one reader
796       assertEquals(1, openedReaders.size());
797       // And that reader should be closed.
798       int numNotClosed = 0;
799       for (InputStreamProxy openedReader : openedReaders) {
800         if (!openedReader.isClosed.get()) {
801           numNotClosed++;
802         }
803       }
804       assertEquals("Should not find any open readers", 0, numNotClosed);
805     } finally {
806       if (reader != null) {
807         reader.close();
808       }
809     }
810   }
811 
812   /**
813    * A proxy around FSDataInputStream which can report if close() was called.
814    */
815   private static class InputStreamProxy extends FSDataInputStream {
816     private final InputStream real;
817     private final AtomicBoolean isClosed = new AtomicBoolean(false);
818 
819     public InputStreamProxy(InputStream real) {
820       super(real);
821       this.real = real;
822     }
823 
824     @Override
825     public void close() throws IOException {
826       isClosed.set(true);
827       real.close();
828     }
829   }
830 
831   /**
832    * A custom WALCellCodec in which we can inject failure.
833    */
834   public static class BrokenWALCellCodec extends WALCellCodec {
835     static final AtomicBoolean THROW_FAILURE_ON_INIT = new AtomicBoolean(false);
836 
837     static void maybeInjectFailure() {
838       if (THROW_FAILURE_ON_INIT.get()) {
839         throw new RuntimeException("Injected instantiation exception");
840       }
841     }
842 
843     public BrokenWALCellCodec() {
844       super();
845       maybeInjectFailure();
846     }
847 
848     public BrokenWALCellCodec(Configuration conf, CompressionContext compression) {
849       super(conf, compression);
850       maybeInjectFailure();
851     }
852   }
853 
854   static class DumbWALActionsListener extends WALActionsListener.Base {
855     int increments = 0;
856 
857     @Override
858     public void visitLogEntryBeforeWrite(HRegionInfo info, WALKey logKey,
859                                          WALEdit logEdit) {
860       increments++;
861     }
862 
863     @Override
864     public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) {
865       //To change body of implemented methods use File | Settings | File Templates.
866       increments++;
867     }
868   }
869 
870   private static final byte[] UNSPECIFIED_REGION = new byte[]{};
871 
872 }