1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.regionserver.wal;
20
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertNotEquals;
24 import static org.junit.Assert.assertNotNull;
25 import static org.junit.Assert.assertTrue;
26 import static org.junit.Assert.fail;
27
28 import java.io.IOException;
29 import java.lang.reflect.Field;
30 import java.util.ArrayList;
31 import java.util.Comparator;
32 import java.util.List;
33 import java.util.Set;
34 import java.util.UUID;
35 import java.util.concurrent.CountDownLatch;
36 import java.util.concurrent.ExecutorService;
37 import java.util.concurrent.Executors;
38 import java.util.concurrent.TimeUnit;
39 import java.util.concurrent.atomic.AtomicBoolean;
40
41 import org.apache.commons.lang.mutable.MutableBoolean;
42 import org.apache.commons.logging.Log;
43 import org.apache.commons.logging.LogFactory;
44 import org.apache.hadoop.conf.Configuration;
45 import org.apache.hadoop.fs.FileStatus;
46 import org.apache.hadoop.fs.FileSystem;
47 import org.apache.hadoop.fs.Path;
48 import org.apache.hadoop.hbase.CellScanner;
49 import org.apache.hadoop.hbase.Coprocessor;
50 import org.apache.hadoop.hbase.HBaseConfiguration;
51 import org.apache.hadoop.hbase.HBaseTestingUtility;
52 import org.apache.hadoop.hbase.HColumnDescriptor;
53 import org.apache.hadoop.hbase.HConstants;
54 import org.apache.hadoop.hbase.HRegionInfo;
55 import org.apache.hadoop.hbase.HTableDescriptor;
56 import org.apache.hadoop.hbase.KeyValue;
57 import org.apache.hadoop.hbase.TableName;
58 import org.apache.hadoop.hbase.Waiter;
59 import org.apache.hadoop.hbase.client.Get;
60 import org.apache.hadoop.hbase.client.Put;
61 import org.apache.hadoop.hbase.client.Result;
62 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
63 import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver;
64 import org.apache.hadoop.hbase.regionserver.HRegion;
65 import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
66 import org.apache.hadoop.hbase.regionserver.Region;
67 import org.apache.hadoop.hbase.testclassification.MediumTests;
68 import org.apache.hadoop.hbase.util.Bytes;
69 import org.apache.hadoop.hbase.util.EnvironmentEdge;
70 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
71 import org.apache.hadoop.hbase.util.Threads;
72 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
73 import org.apache.hadoop.hbase.wal.WAL;
74 import org.apache.hadoop.hbase.wal.WALKey;
75 import org.apache.hadoop.hbase.wal.WALProvider;
76 import org.junit.After;
77 import org.junit.AfterClass;
78 import org.junit.Before;
79 import org.junit.BeforeClass;
80 import org.junit.Rule;
81 import org.junit.Test;
82 import org.junit.experimental.categories.Category;
83 import org.junit.rules.TestName;
84
85
86
87
88 @Category(MediumTests.class)
89 public class TestFSHLog {
90 private static final Log LOG = LogFactory.getLog(TestFSHLog.class);
91
92 private static final long TEST_TIMEOUT_MS = 10000;
93
94 protected static Configuration conf;
95 protected static FileSystem fs;
96 protected static Path dir;
97 protected static Path rootDir;
98 protected static Path walRootDir;
99 protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
100
101 @Rule
102 public final TestName currentTest = new TestName();
103
104 @Before
105 public void setUp() throws Exception {
106 FileStatus[] entries = fs.listStatus(new Path("/"));
107 for (FileStatus dir : entries) {
108 fs.delete(dir.getPath(), true);
109 }
110 rootDir = TEST_UTIL.createRootDir();
111 walRootDir = TEST_UTIL.createWALRootDir();
112 dir = new Path(walRootDir, currentTest.getMethodName());
113 assertNotEquals(rootDir, walRootDir);
114 }
115
116 @After
117 public void tearDown() throws Exception {
118 }
119
120 @BeforeClass
121 public static void setUpBeforeClass() throws Exception {
122
123 TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
124
125 TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
126 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
127 TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);
128
129
130 TEST_UTIL.getConfiguration()
131 .setInt("hbase.ipc.client.connect.max.retries", 1);
132 TEST_UTIL.getConfiguration().setInt(
133 "dfs.client.block.recovery.retries", 1);
134 TEST_UTIL.getConfiguration().setInt(
135 "hbase.ipc.client.connection.maxidletime", 500);
136 TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
137 SampleRegionWALObserver.class.getName());
138 TEST_UTIL.startMiniDFSCluster(3);
139
140 conf = TEST_UTIL.getConfiguration();
141 fs = TEST_UTIL.getDFSCluster().getFileSystem();
142 }
143
144 @AfterClass
145 public static void tearDownAfterClass() throws Exception {
146 fs.delete(rootDir, true);
147 fs.delete(walRootDir, true);
148 TEST_UTIL.shutdownMiniCluster();
149 }
150
151
152
153
154 @Test
155 public void testWALCoprocessorLoaded() throws Exception {
156
157 FSHLog log = null;
158 try {
159 log = new FSHLog(fs, walRootDir, dir.toString(),
160 HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
161 WALCoprocessorHost host = log.getCoprocessorHost();
162 Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName());
163 assertNotNull(c);
164 } finally {
165 if (log != null) {
166 log.close();
167 }
168 }
169 }
170
171
172
173
174 @Test
175 public void testDeadlockWithSyncOverwrites() throws Exception {
176 final CountDownLatch blockBeforeSafePoint = new CountDownLatch(1);
177
178 class FailingWriter implements WALProvider.Writer {
179 @Override public void sync(boolean forceSync) throws IOException {
180 throw new IOException("Injected failure..");
181 }
182
183 @Override public void append(WAL.Entry entry) throws IOException {
184 }
185
186 @Override public long getLength() throws IOException {
187 return 0;
188 }
189 @Override public void close() throws IOException {
190 }
191 }
192
193
194
195
196 class CustomFSHLog extends FSHLog {
197 public CustomFSHLog(FileSystem fs, Path rootDir, String logDir, String archiveDir,
198 Configuration conf, List<WALActionsListener> listeners, boolean failIfWALExists,
199 String prefix, String suffix) throws IOException {
200 super(fs, rootDir, logDir, archiveDir, conf, listeners, failIfWALExists, prefix, suffix);
201 }
202
203 @Override
204 protected void beforeWaitOnSafePoint() {
205 try {
206 assertTrue(blockBeforeSafePoint.await(TEST_TIMEOUT_MS, TimeUnit.MILLISECONDS));
207 } catch (InterruptedException e) {
208 throw new RuntimeException(e);
209 }
210 }
211 }
212
213 try (FSHLog log = new CustomFSHLog(fs, walRootDir, dir.toString(),
214 HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null)) {
215 log.setWriter(new FailingWriter());
216 Field ringBufferEventHandlerField =
217 FSHLog.class.getDeclaredField("ringBufferEventHandler");
218 ringBufferEventHandlerField.setAccessible(true);
219 FSHLog.RingBufferEventHandler ringBufferEventHandler =
220 (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
221
222 final FSHLog.SafePointZigZagLatch latch = ringBufferEventHandler.attainSafePoint();
223 try {
224 final SyncFuture future0 = log.publishSyncOnRingBuffer(null, false);
225
226 Waiter.waitFor(conf, TEST_TIMEOUT_MS, new Waiter.Predicate<Exception>() {
227 @Override
228 public boolean evaluate() throws Exception {
229 return future0.isDone();
230 }
231 });
232
233 SyncFuture future1 = log.publishSyncOnRingBuffer(null, false);
234 assertFalse(future1.isDone());
235
236 blockBeforeSafePoint.countDown();
237
238
239 Waiter.waitFor(conf, TEST_TIMEOUT_MS, new Waiter.Predicate<Exception>() {
240 @Override
241 public boolean evaluate() throws Exception {
242 return latch.isSafePointAttained();
243 }
244 });
245 } finally {
246
247 latch.releaseSafePoint();
248 }
249 }
250 }
251
252 protected void addEdits(WAL log,
253 HRegionInfo hri,
254 HTableDescriptor htd,
255 int times,
256 MultiVersionConcurrencyControl mvcc)
257 throws IOException {
258 final byte[] row = Bytes.toBytes("row");
259 for (int i = 0; i < times; i++) {
260 long timestamp = System.currentTimeMillis();
261 WALEdit cols = new WALEdit();
262 cols.add(new KeyValue(row, row, row, timestamp, row));
263 WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(),
264 WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE,
265 HConstants.NO_NONCE, mvcc);
266 log.append(htd, hri, key, cols, true);
267 }
268 log.sync();
269 }
270
271
272
273
274
275
276 protected void flushRegion(WAL wal, byte[] regionEncodedName, Set<byte[]> flushedFamilyNames) {
277 wal.startCacheFlush(regionEncodedName, flushedFamilyNames);
278 wal.completeCacheFlush(regionEncodedName);
279 }
280
281
282
283
284
285
286 @Test
287 public void testWALComparator() throws Exception {
288 FSHLog wal1 = null;
289 FSHLog walMeta = null;
290 try {
291 wal1 = new FSHLog(fs, walRootDir, dir.toString(),
292 HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null, null);
293 LOG.debug("Log obtained is: " + wal1);
294 Comparator<Path> comp = wal1.LOG_NAME_COMPARATOR;
295 Path p1 = wal1.computeFilename(11);
296 Path p2 = wal1.computeFilename(12);
297
298 assertTrue(comp.compare(p1, p1) == 0);
299
300 assertTrue(comp.compare(p1, p2) < 0);
301 walMeta = new FSHLog(fs, walRootDir, dir.toString(),
302 HConstants.HREGION_OLDLOGDIR_NAME, conf, null, true, null,
303 DefaultWALProvider.META_WAL_PROVIDER_ID);
304 Comparator<Path> compMeta = walMeta.LOG_NAME_COMPARATOR;
305
306 Path p1WithMeta = walMeta.computeFilename(11);
307 Path p2WithMeta = walMeta.computeFilename(12);
308 assertTrue(compMeta.compare(p1WithMeta, p1WithMeta) == 0);
309 assertTrue(compMeta.compare(p1WithMeta, p2WithMeta) < 0);
310
311 boolean ex = false;
312 try {
313 comp.compare(p1WithMeta, p2);
314 } catch (IllegalArgumentException e) {
315 ex = true;
316 }
317 assertTrue("Comparator doesn't complain while checking meta log files", ex);
318 boolean exMeta = false;
319 try {
320 compMeta.compare(p1WithMeta, p2);
321 } catch (IllegalArgumentException e) {
322 exMeta = true;
323 }
324 assertTrue("Meta comparator doesn't complain while checking log files", exMeta);
325 } finally {
326 if (wal1 != null) {
327 wal1.close();
328 }
329 if (walMeta != null) {
330 walMeta.close();
331 }
332 }
333 }
334
335
336
337
338
339
340
341
342
343
344 @Test
345 public void testFindMemStoresEligibleForFlush() throws Exception {
346 LOG.debug("testFindMemStoresEligibleForFlush");
347 Configuration conf1 = HBaseConfiguration.create(conf);
348 conf1.setInt("hbase.regionserver.maxlogs", 1);
349 FSHLog wal = new FSHLog(fs, walRootDir, dir.toString(),
350 HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
351 HTableDescriptor t1 =
352 new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
353 HTableDescriptor t2 =
354 new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row"));
355 HRegionInfo hri1 =
356 new HRegionInfo(t1.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
357 HRegionInfo hri2 =
358 new HRegionInfo(t2.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
359
360 MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
361 try {
362 addEdits(wal, hri1, t1, 2, mvcc);
363 wal.rollWriter();
364
365 addEdits(wal, hri1, t1, 2, mvcc);
366 wal.rollWriter();
367
368 assertTrue(wal.getNumRolledLogFiles() == 2);
369
370
371
372 byte[][] regionsToFlush = wal.findRegionsToForceFlush();
373 assertEquals(1, regionsToFlush.length);
374 assertTrue(Bytes.equals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]));
375
376 addEdits(wal, hri2, t2, 2, mvcc);
377
378 regionsToFlush = wal.findRegionsToForceFlush();
379 assertEquals(regionsToFlush.length, 1);
380 assertTrue(Bytes.equals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]));
381
382
383 flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
384 wal.rollWriter();
385
386 assertEquals(1, wal.getNumRolledLogFiles());
387
388 flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
389 wal.rollWriter(true);
390
391 assertEquals(0, wal.getNumRolledLogFiles());
392
393 addEdits(wal, hri1, t1, 2, mvcc);
394 addEdits(wal, hri2, t2, 2, mvcc);
395 wal.rollWriter();
396
397 assertEquals(1, wal.getNumRolledLogFiles());
398 addEdits(wal, hri1, t1, 2, mvcc);
399 wal.rollWriter();
400
401
402 regionsToFlush = wal.findRegionsToForceFlush();
403 assertEquals(2, regionsToFlush.length);
404
405 flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
406 flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
407 wal.rollWriter(true);
408 assertEquals(0, wal.getNumRolledLogFiles());
409
410 addEdits(wal, hri1, t1, 2, mvcc);
411
412 wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
413 wal.rollWriter();
414 wal.completeCacheFlush(hri1.getEncodedNameAsBytes());
415 assertEquals(1, wal.getNumRolledLogFiles());
416 } finally {
417 if (wal != null) {
418 wal.close();
419 }
420 }
421 }
422
423 @Test(expected=IOException.class)
424 public void testFailedToCreateWALIfParentRenamed() throws IOException {
425 final String name = "testFailedToCreateWALIfParentRenamed";
426 FSHLog log = new FSHLog(fs, walRootDir, name, HConstants.HREGION_OLDLOGDIR_NAME,
427 conf, null, true, null, null);
428 long filenum = System.currentTimeMillis();
429 Path path = log.computeFilename(filenum);
430 log.createWriterInstance(path);
431 Path parent = path.getParent();
432 path = log.computeFilename(filenum + 1);
433 Path newPath = new Path(parent.getParent(), parent.getName() + "-splitting");
434 fs.rename(parent, newPath);
435 log.createWriterInstance(path);
436 fail("It should fail to create the new WAL");
437 }
438
439
440
441
442
443
444
445
446
447 @Test
448 public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
449 String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
450 final TableName tableName = TableName.valueOf(testName);
451 final HRegionInfo hri = new HRegionInfo(tableName);
452 final byte[] rowName = tableName.getName();
453 final HTableDescriptor htd = new HTableDescriptor(tableName);
454 htd.addFamily(new HColumnDescriptor("f"));
455 HRegion r = HRegion.createHRegion(hri, rootDir,
456 TEST_UTIL.getConfiguration(), htd);
457 HRegion.closeHRegion(r);
458 final int countPerFamily = 10;
459 final MutableBoolean goslow = new MutableBoolean(false);
460
461 FSHLog wal = new FSHLog(FileSystem.get(conf), walRootDir,
462 testName, conf) {
463 @Override
464 void atHeadOfRingBufferEventHandlerAppend() {
465 if (goslow.isTrue()) {
466 Threads.sleep(100);
467 LOG.debug("Sleeping before appending 100ms");
468 }
469 super.atHeadOfRingBufferEventHandlerAppend();
470 }
471 };
472 HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
473 TEST_UTIL.getTestFileSystem(), rootDir, hri, htd, wal);
474 EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
475 try {
476 List<Put> puts = null;
477 for (HColumnDescriptor hcd: htd.getFamilies()) {
478 puts =
479 TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
480 }
481
482
483 final Get g = new Get(rowName);
484 Result result = region.get(g);
485 assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
486
487
488 WALEdit edits = new WALEdit();
489 for (Put p: puts) {
490 CellScanner cs = p.cellScanner();
491 while (cs.advance()) {
492 edits.add(cs.current());
493 }
494 }
495
496 List<UUID> clusterIds = new ArrayList<UUID>();
497 clusterIds.add(UUID.randomUUID());
498
499 goslow.setValue(true);
500 for (int i = 0; i < countPerFamily; i++) {
501 final HRegionInfo info = region.getRegionInfo();
502 final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
503 System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
504 wal.append(htd, info, logkey, edits, true);
505 region.getMVCC().completeAndWait(logkey.getWriteEntry());
506 }
507 region.flush(true);
508
509 long currentSequenceId = region.getSequenceId();
510
511 goslow.setValue(false);
512 synchronized (goslow) {
513 goslow.notifyAll();
514 }
515 assertTrue(currentSequenceId >= region.getSequenceId());
516 } finally {
517 region.close(true);
518 wal.close();
519 }
520 }
521
522 @Test
523 public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException,
524 SecurityException, IllegalArgumentException, IllegalAccessException {
525 final String name = "testSyncRunnerIndexOverflow";
526 FSHLog log =
527 new FSHLog(fs, walRootDir, name, HConstants.HREGION_OLDLOGDIR_NAME, conf,
528 null, true, null, null);
529 try {
530 Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
531 ringBufferEventHandlerField.setAccessible(true);
532 FSHLog.RingBufferEventHandler ringBufferEventHandler =
533 (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
534 Field syncRunnerIndexField =
535 FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex");
536 syncRunnerIndexField.setAccessible(true);
537 syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
538 HTableDescriptor htd =
539 new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
540 HRegionInfo hri =
541 new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
542 MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
543 for (int i = 0; i < 10; i++) {
544 addEdits(log, hri, htd, 1, mvcc);
545 }
546 } finally {
547 log.close();
548 }
549 }
550
551
552
553
554 @Test (timeout = 30000)
555 public void testUnflushedSeqIdTracking() throws IOException, InterruptedException {
556 final String name = "testSyncRunnerIndexOverflow";
557 final byte[] b = Bytes.toBytes("b");
558
559 final AtomicBoolean startHoldingForAppend = new AtomicBoolean(false);
560 final CountDownLatch holdAppend = new CountDownLatch(1);
561 final CountDownLatch flushFinished = new CountDownLatch(1);
562 final CountDownLatch putFinished = new CountDownLatch(1);
563
564 try (FSHLog log =
565 new FSHLog(fs, walRootDir, name, HConstants.HREGION_OLDLOGDIR_NAME, conf,
566 null, true, null, null)) {
567
568 log.registerWALActionsListener(new WALActionsListener.Base() {
569 @Override
570 public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit)
571 throws IOException {
572 if (startHoldingForAppend.get()) {
573 try {
574 holdAppend.await();
575 } catch (InterruptedException e) {
576 LOG.error(e);
577 }
578 }
579 }
580 });
581
582
583 HTableDescriptor htd =
584 new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor(b));
585 HRegionInfo hri =
586 new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
587
588 final HRegion region = TEST_UTIL.createLocalHRegion(hri, conf, htd, log);
589 ExecutorService exec = Executors.newFixedThreadPool(2);
590
591
592 region.put(new Put(b).addColumn(b, b,b));
593
594 startHoldingForAppend.set(true);
595 exec.submit(new Runnable() {
596 @Override
597 public void run() {
598 try {
599 region.put(new Put(b).addColumn(b, b,b));
600 putFinished.countDown();
601 } catch (IOException e) {
602 LOG.error(e);
603 }
604 }
605 });
606
607
608 Threads.sleep(3000);
609
610 exec.submit(new Runnable() {
611 @Override
612 public void run() {
613 try {
614 Region.FlushResult flushResult = region.flush(true);
615 LOG.info("Flush result:" + flushResult.getResult());
616 LOG.info("Flush succeeded:" + flushResult.isFlushSucceeded());
617 flushFinished.countDown();
618 } catch (IOException e) {
619 LOG.error(e);
620 }
621 }
622 });
623
624
625
626 Threads.sleep(3000);
627
628
629 holdAppend.countDown();
630 putFinished.await();
631 flushFinished.await();
632
633
634 assertEquals("Region did not flush?", 1, region.getStoreFileList(new byte[][]{b}).size());
635
636
637 long seqId = log.getEarliestMemstoreSeqNum(hri.getEncodedNameAsBytes());
638 assertEquals("Found seqId for the region which is already flushed",
639 HConstants.NO_SEQNUM, seqId);
640
641 region.close();
642 }
643 }
644 }