1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.replication;
19
20 import static org.junit.Assert.assertArrayEquals;
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertNotNull;
23 import static org.junit.Assert.fail;
24
25 import com.google.protobuf.ServiceException;
26
27 import java.io.Closeable;
28 import java.io.IOException;
29 import java.util.Arrays;
30 import java.util.List;
31 import java.util.Random;
32 import java.util.concurrent.CountDownLatch;
33
34 import org.apache.commons.logging.Log;
35 import org.apache.commons.logging.LogFactory;
36 import org.apache.hadoop.conf.Configuration;
37 import org.apache.hadoop.fs.FileSystem;
38 import org.apache.hadoop.fs.Path;
39 import org.apache.hadoop.hbase.Cell;
40 import org.apache.hadoop.hbase.ClusterStatus;
41 import org.apache.hadoop.hbase.HBaseConfiguration;
42 import org.apache.hadoop.hbase.HBaseTestingUtility;
43 import org.apache.hadoop.hbase.HColumnDescriptor;
44 import org.apache.hadoop.hbase.HConstants;
45 import org.apache.hadoop.hbase.HTableDescriptor;
46 import org.apache.hadoop.hbase.KeyValue;
47 import org.apache.hadoop.hbase.MiniHBaseCluster;
48 import org.apache.hadoop.hbase.ServerLoad;
49 import org.apache.hadoop.hbase.ServerName;
50 import org.apache.hadoop.hbase.TableName;
51 import org.apache.hadoop.hbase.Waiter;
52 import org.apache.hadoop.hbase.client.Admin;
53 import org.apache.hadoop.hbase.client.Delete;
54 import org.apache.hadoop.hbase.client.Durability;
55 import org.apache.hadoop.hbase.client.Get;
56 import org.apache.hadoop.hbase.client.HBaseAdmin;
57 import org.apache.hadoop.hbase.client.HTable;
58 import org.apache.hadoop.hbase.client.Put;
59 import org.apache.hadoop.hbase.client.Result;
60 import org.apache.hadoop.hbase.client.Table;
61 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
62 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
63 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
64 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
65 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
66 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
67 import org.apache.hadoop.hbase.regionserver.HRegion;
68 import org.apache.hadoop.hbase.regionserver.HRegionServer;
69 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
70 import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
71 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
72 import org.apache.hadoop.hbase.replication.regionserver.TestSourceFSConfigurationProvider;
73 import org.apache.hadoop.hbase.testclassification.LargeTests;
74 import org.apache.hadoop.hbase.util.Bytes;
75 import org.apache.hadoop.hbase.util.HFileTestUtil;
76 import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
77 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
78 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
79 import org.junit.After;
80 import org.junit.Before;
81 import org.junit.Test;
82 import org.junit.experimental.categories.Category;
83
84 @Category(LargeTests.class)
85 public class TestMasterReplication {
86
87 private static final Log LOG = LogFactory.getLog(TestReplicationBase.class);
88
89 private Configuration baseConfiguration;
90
91 private HBaseTestingUtility[] utilities;
92 private Configuration[] configurations;
93 private MiniZooKeeperCluster miniZK;
94
95 private static final long SLEEP_TIME = 1000;
96 private static final int NB_RETRIES = 120;
97
98 private static final TableName tableName = TableName.valueOf("test");
99 private static final byte[] famName = Bytes.toBytes("f");
100 private static final byte[] famName1 = Bytes.toBytes("f1");
101 private static final byte[] row = Bytes.toBytes("row");
102 private static final byte[] row1 = Bytes.toBytes("row1");
103 private static final byte[] row2 = Bytes.toBytes("row2");
104 private static final byte[] row3 = Bytes.toBytes("row3");
105 private static final byte[] row4 = Bytes.toBytes("row4");
106 private static final byte[] noRepfamName = Bytes.toBytes("norep");
107
108 private static final byte[] count = Bytes.toBytes("count");
109 private static final byte[] put = Bytes.toBytes("put");
110 private static final byte[] delete = Bytes.toBytes("delete");
111
112 private HTableDescriptor table;
113
114 @Before
115 public void setUp() throws Exception {
116 baseConfiguration = HBaseConfiguration.create();
117
118
119 baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
120 baseConfiguration.setInt("replication.source.size.capacity", 1024);
121 baseConfiguration.setLong("replication.source.sleepforretries", 100);
122 baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
123 baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
124 baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY,
125 HConstants.REPLICATION_ENABLE_DEFAULT);
126 baseConfiguration.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
127 baseConfiguration.set("hbase.replication.source.fs.conf.provider",
128 TestSourceFSConfigurationProvider.class.getCanonicalName());
129 baseConfiguration.set(HConstants.REPLICATION_CLUSTER_ID, "12345");
130 baseConfiguration.setBoolean("dfs.support.append", true);
131 baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
132 baseConfiguration.setStrings(
133 CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
134 CoprocessorCounter.class.getName());
135
136 table = new HTableDescriptor(tableName);
137 HColumnDescriptor fam = new HColumnDescriptor(famName);
138 fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
139 table.addFamily(fam);
140 fam = new HColumnDescriptor(famName1);
141 fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
142 table.addFamily(fam);
143 fam = new HColumnDescriptor(noRepfamName);
144 table.addFamily(fam);
145 }
146
147
148
149
150
151
152
153 @Test(timeout = 300000)
154 public void testCyclicReplication1() throws Exception {
155 LOG.info("testSimplePutDelete");
156 int numClusters = 2;
157 Table[] htables = null;
158 try {
159 htables = setUpClusterTablesAndPeers(numClusters);
160
161 int[] expectedCounts = new int[] { 2, 2 };
162
163
164
165 putAndWait(row, famName, htables[0], htables[1]);
166 putAndWait(row1, famName, htables[1], htables[0]);
167 validateCounts(htables, put, expectedCounts);
168
169 deleteAndWait(row, htables[0], htables[1]);
170 deleteAndWait(row1, htables[1], htables[0]);
171 validateCounts(htables, delete, expectedCounts);
172 } finally {
173 close(htables);
174 shutDownMiniClusters();
175 }
176 }
177
178
179
180
181
182
183 @Test(timeout = 300000)
184 public void testLoopedReplication() throws Exception {
185 LOG.info("testLoopedReplication");
186 startMiniClusters(1);
187 createTableOnClusters(table);
188 addPeer("1", 0, 0);
189 Thread.sleep(SLEEP_TIME);
190
191
192 final ServerName rsName = utilities[0].getHBaseCluster().getRegionServer(0).getServerName();
193 Waiter.waitFor(baseConfiguration, 10000, new Waiter.Predicate<Exception>() {
194 @Override
195 public boolean evaluate() throws Exception {
196 ClusterStatus clusterStatus = utilities[0].getHBaseAdmin().getClusterStatus();
197 ServerLoad serverLoad = clusterStatus.getLoad(rsName);
198 List<ReplicationLoadSource> replicationLoadSourceList =
199 serverLoad.getReplicationLoadSourceList();
200 return replicationLoadSourceList.size() == 0;
201 }
202 });
203
204 Table[] htables = getHTablesOnClusters(tableName);
205 putAndWait(row, famName, htables[0], htables[0]);
206 rollWALAndWait(utilities[0], table.getTableName(), row);
207 ZooKeeperWatcher zkw = utilities[0].getZooKeeperWatcher();
208 String queuesZnode =
209 ZKUtil.joinZNode(zkw.baseZNode, ZKUtil.joinZNode("replication", "rs"));
210 List<String> listChildrenNoWatch =
211 ZKUtil.listChildrenNoWatch(zkw, ZKUtil.joinZNode(queuesZnode, rsName.toString()));
212 assertEquals(0, listChildrenNoWatch.size());
213 }
214
215
216
217
218
219 @Test(timeout = 300000)
220 public void testHFileCyclicReplication() throws Exception {
221 LOG.info("testHFileCyclicReplication");
222 int numClusters = 2;
223 Table[] htables = null;
224 try {
225 htables = setUpClusterTablesAndPeers(numClusters);
226
227
228
229 byte[][][] hfileRanges =
230 new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
231 new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, };
232 int numOfRows = 100;
233 int[] expectedCounts =
234 new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows };
235
236 loadAndValidateHFileReplication("testHFileCyclicReplication_01", 0, new int[] { 1 }, row,
237 famName, htables, hfileRanges, numOfRows, expectedCounts, true);
238
239
240
241 hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") },
242 new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, };
243 numOfRows = 200;
244 int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0],
245 hfileRanges.length * numOfRows + expectedCounts[1] };
246
247 loadAndValidateHFileReplication("testHFileCyclicReplication_10", 1, new int[] { 0 }, row,
248 famName, htables, hfileRanges, numOfRows, newExpectedCounts, true);
249
250 } finally {
251 close(htables);
252 shutDownMiniClusters();
253 }
254 }
255
256 private Table[] setUpClusterTablesAndPeers(int numClusters) throws Exception {
257 Table[] htables;
258 startMiniClusters(numClusters);
259 createTableOnClusters(table);
260
261 htables = getHTablesOnClusters(tableName);
262
263 addPeer("1", 0, 1);
264 addPeer("1", 1, 0);
265 return htables;
266 }
267
268
269
270
271
272
273
274
275 @Test(timeout = 300000)
276 public void testCyclicReplication2() throws Exception {
277 LOG.info("testCyclicReplication2");
278 int numClusters = 3;
279 Table[] htables = null;
280 try {
281 startMiniClusters(numClusters);
282 createTableOnClusters(table);
283
284
285 addPeer("1", 0, 1);
286 addPeer("1", 1, 2);
287 addPeer("1", 2, 0);
288
289 htables = getHTablesOnClusters(tableName);
290
291
292 putAndWait(row, famName, htables[0], htables[2]);
293 putAndWait(row1, famName, htables[1], htables[0]);
294 putAndWait(row2, famName, htables[2], htables[1]);
295
296 deleteAndWait(row, htables[0], htables[2]);
297 deleteAndWait(row1, htables[1], htables[0]);
298 deleteAndWait(row2, htables[2], htables[1]);
299
300 int[] expectedCounts = new int[] { 3, 3, 3 };
301 validateCounts(htables, put, expectedCounts);
302 validateCounts(htables, delete, expectedCounts);
303
304
305 disablePeer("1", 2);
306
307
308 putAndWait(row3, famName, htables[0], htables[1]);
309
310 htables[1].put(new Put(row4).add(famName, row4, row4));
311
312 enablePeer("1", 2);
313
314
315
316 wait(row4, htables[0], false);
317 } finally {
318 close(htables);
319 shutDownMiniClusters();
320 }
321 }
322
323
324
325
326
327 @Test(timeout = 300000)
328 public void testHFileMultiSlaveReplication() throws Exception {
329 LOG.info("testHFileMultiSlaveReplication");
330 int numClusters = 3;
331 Table[] htables = null;
332 try {
333 startMiniClusters(numClusters);
334 createTableOnClusters(table);
335
336
337 addPeer("1", 0, 1);
338
339 htables = getHTablesOnClusters(tableName);
340
341
342
343 byte[][][] hfileRanges =
344 new byte[][][] { new byte[][] { Bytes.toBytes("mmmm"), Bytes.toBytes("oooo") },
345 new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("rrr") }, };
346 int numOfRows = 100;
347
348 int[] expectedCounts =
349 new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows };
350
351 loadAndValidateHFileReplication("testHFileCyclicReplication_0", 0, new int[] { 1 }, row,
352 famName, htables, hfileRanges, numOfRows, expectedCounts, true);
353
354
355 assertEquals(0, utilities[2].countRows(htables[2]));
356
357 rollWALAndWait(utilities[0], htables[0].getName(), row);
358
359
360 addPeer("2", 0, 2);
361
362
363
364 hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("ssss"), Bytes.toBytes("uuuu") },
365 new byte[][] { Bytes.toBytes("vvv"), Bytes.toBytes("xxx") }, };
366 numOfRows = 200;
367
368 int[] newExpectedCounts = new int[] { hfileRanges.length * numOfRows + expectedCounts[0],
369 hfileRanges.length * numOfRows + expectedCounts[1], hfileRanges.length * numOfRows };
370
371 loadAndValidateHFileReplication("testHFileCyclicReplication_1", 0, new int[] { 1, 2 }, row,
372 famName, htables, hfileRanges, numOfRows, newExpectedCounts, true);
373
374 } finally {
375 close(htables);
376 shutDownMiniClusters();
377 }
378 }
379
380
381
382
383
384
385 @Test(timeout = 300000)
386 public void testHFileReplicationForConfiguredTableCfs() throws Exception {
387 LOG.info("testHFileReplicationForConfiguredTableCfs");
388 int numClusters = 2;
389 Table[] htables = null;
390 try {
391 startMiniClusters(numClusters);
392 createTableOnClusters(table);
393
394 htables = getHTablesOnClusters(tableName);
395
396 addPeer("1", 0, 1, tableName.getNameAsString() + ":" + Bytes.toString(famName));
397
398
399 byte[][][] hfileRanges =
400 new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") },
401 new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("fff") }, };
402 int numOfRows = 100;
403 int[] expectedCounts =
404 new int[] { hfileRanges.length * numOfRows, hfileRanges.length * numOfRows };
405
406 loadAndValidateHFileReplication("load_f", 0, new int[] { 1 }, row, famName, htables,
407 hfileRanges, numOfRows, expectedCounts, true);
408
409
410 hfileRanges = new byte[][][] { new byte[][] { Bytes.toBytes("gggg"), Bytes.toBytes("iiii") },
411 new byte[][] { Bytes.toBytes("jjj"), Bytes.toBytes("lll") }, };
412 numOfRows = 100;
413
414 int[] newExpectedCounts =
415 new int[] { hfileRanges.length * numOfRows + expectedCounts[0], expectedCounts[1] };
416
417 loadAndValidateHFileReplication("load_f1", 0, new int[] { 1 }, row, famName1, htables,
418 hfileRanges, numOfRows, newExpectedCounts, false);
419
420
421
422
423 wait(0, htables[0], hfileRanges.length * numOfRows + expectedCounts[0]);
424
425
426
427 Thread.sleep((NB_RETRIES / 2) * SLEEP_TIME);
428
429 wait(1, htables[1], expectedCounts[1]);
430 } finally {
431 close(htables);
432 shutDownMiniClusters();
433 }
434 }
435
436
437
438
439 @Test(timeout = 300000)
440 public void testCyclicReplication3() throws Exception {
441 LOG.info("testCyclicReplication2");
442 int numClusters = 3;
443 Table[] htables = null;
444 try {
445 startMiniClusters(numClusters);
446 createTableOnClusters(table);
447
448
449 addPeer("1", 0, 1);
450 addPeer("1", 1, 2);
451 addPeer("1", 2, 1);
452
453 htables = getHTablesOnClusters(tableName);
454
455
456 putAndWait(row, famName, htables[0], htables[2]);
457 putAndWait(row1, famName, htables[1], htables[2]);
458 putAndWait(row2, famName, htables[2], htables[1]);
459
460 deleteAndWait(row, htables[0], htables[2]);
461 deleteAndWait(row1, htables[1], htables[2]);
462 deleteAndWait(row2, htables[2], htables[1]);
463
464 int[] expectedCounts = new int[] { 1, 3, 3 };
465 validateCounts(htables, put, expectedCounts);
466 validateCounts(htables, delete, expectedCounts);
467 } finally {
468 close(htables);
469 shutDownMiniClusters();
470 }
471 }
472
473
474
475
476
477 @Test(timeout = 180000, expected = ServiceException.class)
478 public void testReplicateWALEntryWhenReplicationIsDisabled() throws Exception {
479 LOG.info("testSimplePutDelete");
480 baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY, false);
481 Table[] htables = null;
482 try {
483 startMiniClusters(1);
484 createTableOnClusters(table);
485 htables = getHTablesOnClusters(tableName);
486
487 HRegionServer rs = utilities[0].getRSForFirstRegionInTable(tableName);
488 RSRpcServices rsrpc = new RSRpcServices(rs);
489 rsrpc.replicateWALEntry(null, null);
490 } finally {
491 close(htables);
492 shutDownMiniClusters();
493 }
494 }
495
496
497
498
499
500
501 @Test
502 public void testBasePeerConfigsForPeerMutations()
503 throws Exception {
504 LOG.info("testBasePeerConfigsForPeerMutations");
505 String firstCustomPeerConfigKey = "hbase.xxx.custom_config";
506 String firstCustomPeerConfigValue = "test";
507 String firstCustomPeerConfigUpdatedValue = "test_updated";
508
509 String secondCustomPeerConfigKey = "hbase.xxx.custom_second_config";
510 String secondCustomPeerConfigValue = "testSecond";
511 String secondCustomPeerConfigUpdatedValue = "testSecondUpdated";
512 try {
513 baseConfiguration.set(ReplicationPeerConfig.HBASE_REPLICATION_PEER_BASE_CONFIG,
514 firstCustomPeerConfigKey.concat("=").concat(firstCustomPeerConfigValue));
515 startMiniClusters(2);
516 addPeer("1", 0, 1);
517 addPeer("2", 0, 1);
518
519 ReplicationAdmin replicationAdmin = new ReplicationAdmin(configurations[0]);
520 ReplicationPeerConfig replicationPeerConfig1 = replicationAdmin.getPeerConfig("1");
521 ReplicationPeerConfig replicationPeerConfig2 = replicationAdmin.getPeerConfig("2");
522
523
524 assertEquals(firstCustomPeerConfigValue, replicationPeerConfig1.
525 getConfiguration().get(firstCustomPeerConfigKey));
526 assertEquals(firstCustomPeerConfigValue, replicationPeerConfig2.
527 getConfiguration().get(firstCustomPeerConfigKey));
528
529
530 replicationPeerConfig1.getConfiguration().put(firstCustomPeerConfigKey,
531 firstCustomPeerConfigUpdatedValue);
532
533
534 replicationPeerConfig2.getConfiguration().put(secondCustomPeerConfigKey,
535 secondCustomPeerConfigUpdatedValue);
536
537 replicationAdmin.updatePeerConfig("1", replicationPeerConfig1);
538 replicationAdmin.updatePeerConfig("2", replicationPeerConfig2);
539
540
541 assertEquals(firstCustomPeerConfigUpdatedValue, replicationAdmin.getPeerConfig("1").
542 getConfiguration().get(firstCustomPeerConfigKey));
543 assertEquals(secondCustomPeerConfigUpdatedValue, replicationAdmin.getPeerConfig("2").
544 getConfiguration().get(secondCustomPeerConfigKey));
545
546
547 utilities[0].getConfiguration().set(ReplicationPeerConfig.
548 HBASE_REPLICATION_PEER_BASE_CONFIG, firstCustomPeerConfigKey.concat("=").
549 concat(firstCustomPeerConfigValue).concat(";").concat(secondCustomPeerConfigKey)
550 .concat("=").concat(secondCustomPeerConfigValue));
551
552 utilities[0].shutdownMiniHBaseCluster();
553 utilities[0].restartHBaseCluster(1);
554 replicationAdmin = new ReplicationAdmin(configurations[0]);
555
556
557
558 assertEquals(firstCustomPeerConfigUpdatedValue, replicationAdmin.getPeerConfig("1").
559 getConfiguration().get(firstCustomPeerConfigKey));
560 assertEquals(firstCustomPeerConfigValue, replicationAdmin.getPeerConfig("2").
561 getConfiguration().get(firstCustomPeerConfigKey));
562
563
564 assertEquals(secondCustomPeerConfigValue, replicationAdmin.getPeerConfig("1").
565 getConfiguration().get(secondCustomPeerConfigKey));
566
567 assertEquals(secondCustomPeerConfigUpdatedValue, replicationAdmin.getPeerConfig("2").
568 getConfiguration().get(secondCustomPeerConfigKey));
569 } finally {
570 shutDownMiniClusters();
571 baseConfiguration.unset(ReplicationPeerConfig.HBASE_REPLICATION_PEER_BASE_CONFIG);
572 }
573 }
574
575
576 @After
577 public void tearDown() throws IOException {
578 configurations = null;
579 utilities = null;
580 }
581
582 @SuppressWarnings("resource")
583 private void startMiniClusters(int numClusters) throws Exception {
584 Random random = new Random();
585 utilities = new HBaseTestingUtility[numClusters];
586 configurations = new Configuration[numClusters];
587 for (int i = 0; i < numClusters; i++) {
588 Configuration conf = new Configuration(baseConfiguration);
589 conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/" + i + random.nextInt());
590 HBaseTestingUtility utility = new HBaseTestingUtility(conf);
591 if (i == 0) {
592 utility.startMiniZKCluster();
593 miniZK = utility.getZkCluster();
594 } else {
595 utility.setZkCluster(miniZK);
596 }
597 utility.startMiniCluster();
598 utilities[i] = utility;
599 configurations[i] = conf;
600 new ZooKeeperWatcher(conf, "cluster" + i, null, true);
601 }
602 }
603
604 private void shutDownMiniClusters() throws Exception {
605 int numClusters = utilities.length;
606 for (int i = numClusters - 1; i >= 0; i--) {
607 if (utilities[i] != null) {
608 utilities[i].shutdownMiniCluster();
609 }
610 }
611 miniZK.shutdown();
612 }
613
614 private void createTableOnClusters(HTableDescriptor table) throws Exception {
615 int numClusters = configurations.length;
616 for (int i = 0; i < numClusters; i++) {
617 Admin hbaseAdmin = null;
618 try {
619 hbaseAdmin = new HBaseAdmin(configurations[i]);
620 hbaseAdmin.createTable(table);
621 } finally {
622 close(hbaseAdmin);
623 }
624 }
625 }
626
627 private void addPeer(String id, int masterClusterNumber,
628 int slaveClusterNumber) throws Exception {
629 ReplicationAdmin replicationAdmin = null;
630 try {
631 replicationAdmin = new ReplicationAdmin(
632 configurations[masterClusterNumber]);
633 ReplicationPeerConfig rpc = new ReplicationPeerConfig();
634 rpc.setClusterKey(utilities[slaveClusterNumber].getClusterKey());
635 replicationAdmin.addPeer(id, rpc);
636 } finally {
637 close(replicationAdmin);
638 }
639 }
640
641 private void addPeer(String id, int masterClusterNumber, int slaveClusterNumber, String tableCfs)
642 throws Exception {
643 ReplicationAdmin replicationAdmin = null;
644 try {
645 replicationAdmin = new ReplicationAdmin(configurations[masterClusterNumber]);
646 ReplicationPeerConfig rpc = new ReplicationPeerConfig();
647 rpc.setClusterKey(utilities[slaveClusterNumber].getClusterKey());
648 replicationAdmin.addPeer(id, rpc, ReplicationSerDeHelper.parseTableCFsFromConfig(tableCfs));
649 } finally {
650 close(replicationAdmin);
651 }
652 }
653
654 private void disablePeer(String id, int masterClusterNumber) throws Exception {
655 ReplicationAdmin replicationAdmin = null;
656 try {
657 replicationAdmin = new ReplicationAdmin(
658 configurations[masterClusterNumber]);
659 replicationAdmin.disablePeer(id);
660 } finally {
661 close(replicationAdmin);
662 }
663 }
664
665 private void enablePeer(String id, int masterClusterNumber) throws Exception {
666 ReplicationAdmin replicationAdmin = null;
667 try {
668 replicationAdmin = new ReplicationAdmin(
669 configurations[masterClusterNumber]);
670 replicationAdmin.enablePeer(id);
671 } finally {
672 close(replicationAdmin);
673 }
674 }
675
676 private void close(Closeable... closeables) {
677 try {
678 if (closeables != null) {
679 for (Closeable closeable : closeables) {
680 closeable.close();
681 }
682 }
683 } catch (Exception e) {
684 LOG.warn("Exception occured while closing the object:", e);
685 }
686 }
687
688 @SuppressWarnings("resource")
689 private Table[] getHTablesOnClusters(TableName tableName) throws Exception {
690 int numClusters = utilities.length;
691 Table[] htables = new Table[numClusters];
692 for (int i = 0; i < numClusters; i++) {
693 Table htable = new HTable(configurations[i], tableName);
694 htable.setWriteBufferSize(1024);
695 htables[i] = htable;
696 }
697 return htables;
698 }
699
700 private void validateCounts(Table[] htables, byte[] type,
701 int[] expectedCounts) throws IOException {
702 for (int i = 0; i < htables.length; i++) {
703 assertEquals(Bytes.toString(type) + " were replicated back ",
704 expectedCounts[i], getCount(htables[i], type));
705 }
706 }
707
708 private int getCount(Table t, byte[] type) throws IOException {
709 Get test = new Get(row);
710 test.setAttribute("count", new byte[] {});
711 Result res = t.get(test);
712 return Bytes.toInt(res.getValue(count, type));
713 }
714
715 private void deleteAndWait(byte[] row, Table source, Table target)
716 throws Exception {
717 Delete del = new Delete(row);
718 source.delete(del);
719 wait(row, target, true);
720 }
721
722 private void putAndWait(byte[] row, byte[] fam, Table source, Table target)
723 throws Exception {
724 Put put = new Put(row);
725 put.add(fam, row, row);
726 source.put(put);
727 wait(row, target, false);
728 }
729
730 private void loadAndValidateHFileReplication(String testName, int masterNumber,
731 int[] slaveNumbers, byte[] row, byte[] fam, Table[] tables, byte[][][] hfileRanges,
732 int numOfRows, int[] expectedCounts, boolean toValidate) throws Exception {
733 HBaseTestingUtility util = utilities[masterNumber];
734
735 Path dir = util.getDataTestDirOnTestFS(testName);
736 FileSystem fs = util.getTestFileSystem();
737 dir = dir.makeQualified(fs);
738 Path familyDir = new Path(dir, Bytes.toString(fam));
739
740 int hfileIdx = 0;
741 for (byte[][] range : hfileRanges) {
742 byte[] from = range[0];
743 byte[] to = range[1];
744 HFileTestUtil.createHFile(util.getConfiguration(), fs,
745 new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
746 }
747
748 Table source = tables[masterNumber];
749 final TableName tableName = source.getName();
750 LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
751 String[] args = { dir.toString(), tableName.toString() };
752 loader.run(args);
753
754 if (toValidate) {
755 for (int slaveClusterNumber : slaveNumbers) {
756 wait(slaveClusterNumber, tables[slaveClusterNumber], expectedCounts[slaveClusterNumber]);
757 }
758 }
759 }
760
761 private void wait(int slaveNumber, Table target, int expectedCount)
762 throws IOException, InterruptedException {
763 int count = 0;
764 for (int i = 0; i < NB_RETRIES; i++) {
765 if (i == NB_RETRIES - 1) {
766 fail("Waited too much time for bulkloaded data replication. Current count=" + count
767 + ", expected count=" + expectedCount);
768 }
769 count = utilities[slaveNumber].countRows(target);
770 if (count != expectedCount) {
771 LOG.info("Waiting more time for bulkloaded data replication.");
772 Thread.sleep(SLEEP_TIME);
773 } else {
774 break;
775 }
776 }
777 }
778
779 private void wait(byte[] row, Table target, boolean isDeleted) throws Exception {
780 Get get = new Get(row);
781 for (int i = 0; i < NB_RETRIES; i++) {
782 if (i == NB_RETRIES - 1) {
783 fail("Waited too much time for replication. Row:" + Bytes.toString(row)
784 + ". IsDeleteReplication:" + isDeleted);
785 }
786 Result res = target.get(get);
787 boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
788 if (sleep) {
789 LOG.info("Waiting for more time for replication. Row:"
790 + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
791 Thread.sleep(SLEEP_TIME);
792 } else {
793 if (!isDeleted) {
794 assertArrayEquals(res.value(), row);
795 }
796 LOG.info("Obtained row:"
797 + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
798 break;
799 }
800 }
801 }
802
803 private void rollWALAndWait(final HBaseTestingUtility utility, final TableName table,
804 final byte[] row) throws IOException {
805 final Admin admin = utility.getHBaseAdmin();
806 final MiniHBaseCluster cluster = utility.getMiniHBaseCluster();
807
808
809 HRegion region = null;
810 for (HRegion candidate : cluster.getRegions(table)) {
811 if (HRegion.rowIsInRange(candidate.getRegionInfo(), row)) {
812 region = candidate;
813 break;
814 }
815 }
816 assertNotNull("Couldn't find the region for row '" + Arrays.toString(row) + "'", region);
817
818 final CountDownLatch latch = new CountDownLatch(1);
819
820
821 final WALActionsListener listener = new WALActionsListener.Base() {
822 @Override
823 public void postLogRoll(final Path oldPath, final Path newPath) throws IOException {
824 latch.countDown();
825 }
826 };
827 region.getWAL().registerWALActionsListener(listener);
828
829
830 admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(),
831 region.getRegionInfo().getRegionName()));
832
833
834 try {
835 latch.await();
836 } catch (InterruptedException exception) {
837 LOG.warn("Interrupted while waiting for the wal of '" + region + "' to roll. If later " +
838 "replication tests fail, it's probably because we should still be waiting.");
839 Thread.currentThread().interrupt();
840 }
841 region.getWAL().unregisterWALActionsListener(listener);
842 }
843
844
845
846
847
848 public static class CoprocessorCounter extends BaseRegionObserver {
849 private int nCount = 0;
850 private int nDelete = 0;
851
852 @Override
853 public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
854 final WALEdit edit, final Durability durability) throws IOException {
855 nCount++;
856 }
857
858 @Override
859 public void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
860 final Delete delete, final WALEdit edit, final Durability durability) throws IOException {
861 nDelete++;
862 }
863
864 @Override
865 public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
866 final Get get, final List<Cell> result) throws IOException {
867 if (get.getAttribute("count") != null) {
868 result.clear();
869
870 result.add(new KeyValue(count, count, delete, Bytes.toBytes(nDelete)));
871 result.add(new KeyValue(count, count, put, Bytes.toBytes(nCount)));
872 c.bypass();
873 }
874 }
875 }
876
877 }