1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 package org.apache.hadoop.hbase.regionserver;
19
20 import static org.apache.hadoop.hbase.HConstants.REPLICATION_CLUSTER_ID;
21 import static org.apache.hadoop.hbase.HConstants.REPLICATION_CONF_DIR;
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertTrue;
24 import java.io.File;
25 import java.io.FileOutputStream;
26 import java.io.IOException;
27 import java.util.List;
28 import java.util.concurrent.CountDownLatch;
29 import java.util.concurrent.TimeUnit;
30 import java.util.concurrent.atomic.AtomicInteger;
31 import org.apache.hadoop.conf.Configuration;
32 import org.apache.hadoop.fs.FSDataOutputStream;
33 import org.apache.hadoop.fs.Path;
34 import org.apache.hadoop.hbase.Cell;
35 import org.apache.hadoop.hbase.HBaseConfiguration;
36 import org.apache.hadoop.hbase.HBaseTestingUtility;
37 import org.apache.hadoop.hbase.HColumnDescriptor;
38 import org.apache.hadoop.hbase.HConstants;
39 import org.apache.hadoop.hbase.HTableDescriptor;
40 import org.apache.hadoop.hbase.KeyValue;
41 import org.apache.hadoop.hbase.client.Admin;
42 import org.apache.hadoop.hbase.client.Connection;
43 import org.apache.hadoop.hbase.client.ConnectionFactory;
44 import org.apache.hadoop.hbase.client.Get;
45 import org.apache.hadoop.hbase.client.Result;
46 import org.apache.hadoop.hbase.client.Table;
47 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
48 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
49 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
50 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
51 import org.apache.hadoop.hbase.io.hfile.HFile;
52 import org.apache.hadoop.hbase.io.hfile.HFileContext;
53 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
54 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
55 import org.apache.hadoop.hbase.replication.TestReplicationBase;
56 import org.apache.hadoop.hbase.testclassification.MediumTests;
57 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
58 import org.apache.hadoop.hbase.util.Bytes;
59 import org.apache.hadoop.hbase.util.Pair;
60 import org.apache.hadoop.hdfs.MiniDFSCluster;
61 import org.junit.After;
62 import org.junit.Before;
63 import org.junit.BeforeClass;
64 import org.junit.ClassRule;
65 import org.junit.Rule;
66 import org.junit.Test;
67 import org.junit.experimental.categories.Category;
68 import org.junit.rules.TemporaryFolder;
69 import org.junit.rules.TestName;
70 import org.slf4j.Logger;
71 import org.slf4j.LoggerFactory;
72
73
74
75
76
77
78
79
80
81
82
83
84
85 @Category({ ReplicationTests.class, MediumTests.class})
86 public class TestBulkLoadReplication extends TestReplicationBase {
87
88 protected static final Logger LOG =
89 LoggerFactory.getLogger(TestBulkLoadReplication.class);
90
91 private static final String PEER1_CLUSTER_ID = "peer1";
92 private static final String PEER4_CLUSTER_ID = "peer4";
93 private static final String PEER3_CLUSTER_ID = "peer3";
94
95 private static final String PEER_ID1 = "1";
96 private static final String PEER_ID3 = "3";
97 private static final String PEER_ID4 = "4";
98
99 private static final AtomicInteger BULK_LOADS_COUNT = new AtomicInteger(0);
100 private static CountDownLatch BULK_LOAD_LATCH;
101
102 private static HBaseTestingUtility utility3;
103 private static HBaseTestingUtility utility4;
104 private static Configuration conf3;
105 private static Configuration conf4;
106
107
108
109 @Rule
110 public TestName name = new TestName();
111
112 @ClassRule
113 public static TemporaryFolder testFolder = new TemporaryFolder();
114
115 @BeforeClass
116 public static void setUpBeforeClass() throws Exception {
117 setupBulkLoadConfigsForCluster(conf1, PEER1_CLUSTER_ID);
118 conf3 = HBaseConfiguration.create(conf1);
119 setupBulkLoadConfigsForCluster(conf3, PEER3_CLUSTER_ID);
120 conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
121 utility3 = new HBaseTestingUtility(conf3);
122 conf4 = HBaseConfiguration.create(conf1);
123 setupBulkLoadConfigsForCluster(conf4, PEER4_CLUSTER_ID);
124 conf4.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/4");
125 utility4 = new HBaseTestingUtility(conf4);
126 TestReplicationBase.setUpBeforeClass();
127 startCluster(utility3, conf3);
128 startCluster(utility4, conf4);
129 }
130
131 private static void startCluster(HBaseTestingUtility util, Configuration configuration)
132 throws Exception {
133 LOG.info("Setup Zk to same one from utility1 and utility4");
134 util.setZkCluster(utility1.getZkCluster());
135 util.startMiniCluster(2);
136
137 HTableDescriptor tableDesc = new HTableDescriptor(tableName);
138 HColumnDescriptor columnDesc = new HColumnDescriptor(famName);
139 columnDesc.setScope(1);
140 tableDesc.addFamily(columnDesc);
141
142 Connection connection = ConnectionFactory.createConnection(configuration);
143 try (Admin admin = connection.getAdmin()) {
144 admin.createTable(tableDesc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
145 }
146 util.waitUntilAllRegionsAssigned(tableName);
147 }
148
149 @Before
150 public void setUpBase() throws Exception {
151 ReplicationPeerConfig peer1Config = getPeerConfigForCluster(utility1);
152 ReplicationPeerConfig peer4Config = getPeerConfigForCluster(utility4);
153 ReplicationPeerConfig peer3Config = getPeerConfigForCluster(utility3);
154
155 getReplicationAdmin(utility1.getConfiguration()).addPeer(PEER_ID4, peer4Config);
156
157 ReplicationAdmin admin4 = getReplicationAdmin(utility4.getConfiguration());
158 admin4.addPeer(PEER_ID1, peer1Config);
159
160 admin4.addPeer(PEER_ID3, peer3Config);
161
162 getReplicationAdmin(utility3.getConfiguration()).addPeer(PEER_ID4, peer4Config);
163 setupCoprocessor(utility1);
164 setupCoprocessor(utility4);
165 setupCoprocessor(utility3);
166 }
167
168 private ReplicationAdmin getReplicationAdmin(Configuration configuration) throws IOException {
169 return new ReplicationAdmin(configuration);
170 }
171
172 private ReplicationPeerConfig getPeerConfigForCluster(HBaseTestingUtility util) {
173 ReplicationPeerConfig config = new ReplicationPeerConfig();
174 config.setClusterKey(util.getClusterKey());
175 return config;
176 }
177
178 private void setupCoprocessor(HBaseTestingUtility cluster) throws IOException {
179 for(HRegion region : cluster.getHBaseCluster().getRegions(tableName)){
180 region.getCoprocessorHost().load(TestBulkLoadReplication.BulkReplicationTestObserver.class,
181 0, cluster.getConfiguration());
182 }
183 }
184
185 @After
186 public void tearDownBase() throws Exception {
187 getReplicationAdmin(utility4.getConfiguration()).removePeer(PEER_ID1);
188 getReplicationAdmin(utility4.getConfiguration()).removePeer(PEER_ID3);
189 getReplicationAdmin(utility3.getConfiguration()).removePeer(PEER_ID4);
190 }
191
192 private static void setupBulkLoadConfigsForCluster(Configuration config,
193 String clusterReplicationId) throws Exception {
194 config.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
195 config.set(REPLICATION_CLUSTER_ID, clusterReplicationId);
196 File sourceConfigFolder = testFolder.newFolder(clusterReplicationId);
197 File sourceConfigFile = new File(sourceConfigFolder.getAbsolutePath()
198 + "/hbase-site.xml");
199 config.writeXml(new FileOutputStream(sourceConfigFile));
200 config.set(REPLICATION_CONF_DIR, testFolder.getRoot().getAbsolutePath());
201 }
202
203 @Test
204 public void testBulkLoadReplicationActiveActive() throws Exception {
205 Table peer1TestTable = utility1.getConnection().getTable(TestReplicationBase.tableName);
206 Table peer4TestTable = utility4.getConnection().getTable(TestReplicationBase.tableName);
207 Table peer3TestTable = utility3.getConnection().getTable(TestReplicationBase.tableName);
208 byte[] row = Bytes.toBytes("001");
209 byte[] value = Bytes.toBytes("v1");
210 assertBulkLoadConditions(row, value, utility1, peer1TestTable, peer4TestTable, peer3TestTable);
211 row = Bytes.toBytes("002");
212 value = Bytes.toBytes("v2");
213 assertBulkLoadConditions(row, value, utility4, peer4TestTable, peer1TestTable, peer3TestTable);
214 row = Bytes.toBytes("003");
215 value = Bytes.toBytes("v3");
216 assertBulkLoadConditions(row, value, utility3, peer3TestTable, peer4TestTable, peer1TestTable);
217
218 Thread.sleep(400);
219
220
221
222 assertEquals(9, BULK_LOADS_COUNT.get());
223 }
224
225 private void assertBulkLoadConditions(byte[] row, byte[] value,
226 HBaseTestingUtility utility, Table...tables) throws Exception {
227 BULK_LOAD_LATCH = new CountDownLatch(3);
228 bulkLoadOnCluster(row, value, utility);
229 assertTrue(BULK_LOAD_LATCH.await(1, TimeUnit.MINUTES));
230 assertTableHasValue(tables[0], row, value);
231 assertTableHasValue(tables[1], row, value);
232 assertTableHasValue(tables[2], row, value);
233 }
234
235 private void bulkLoadOnCluster(byte[] row, byte[] value,
236 HBaseTestingUtility cluster) throws Exception {
237 String bulkLoadFile = createHFileForFamilies(row, value, cluster.getConfiguration());
238 copyToHdfs(bulkLoadFile, cluster.getDFSCluster());
239 LoadIncrementalHFiles bulkLoadHFilesTool =
240 new LoadIncrementalHFiles(cluster.getConfiguration());
241 bulkLoadHFilesTool.run(new String[]{"/bulk_dir/region1/", tableName.getNameAsString()});
242 }
243
244 private void copyToHdfs(String bulkLoadFilePath, MiniDFSCluster cluster) throws Exception {
245 Path bulkLoadDir = new Path("/bulk_dir/region1/f");
246 cluster.getFileSystem().mkdirs(bulkLoadDir);
247 cluster.getFileSystem().copyFromLocalFile(new Path(bulkLoadFilePath), bulkLoadDir);
248 }
249
250 private void assertTableHasValue(Table table, byte[] row, byte[] value) throws Exception {
251 Get get = new Get(row);
252 Result result = table.get(get);
253 assertTrue(result.advance());
254 assertEquals(Bytes.toString(value), Bytes.toString(result.value()));
255 }
256
257 private String createHFileForFamilies(byte[] row, byte[] value,
258 Configuration clusterConfig) throws IOException {
259 final KeyValue kv = new KeyValue(row, famName, Bytes.toBytes("1"), System.currentTimeMillis(),
260 KeyValue.Type.Put, value);
261 final HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(clusterConfig);
262
263 final File hFileLocation = testFolder.newFile();
264 final FSDataOutputStream out =
265 new FSDataOutputStream(new FileOutputStream(hFileLocation), null);
266 try {
267 hFileFactory.withOutputStream(out);
268 hFileFactory.withFileContext(new HFileContext());
269 HFile.Writer writer = hFileFactory.create();
270 try {
271 writer.append(kv);
272 } finally {
273 writer.close();
274 }
275 } finally {
276 out.close();
277 }
278 return hFileLocation.getAbsoluteFile().getAbsolutePath();
279 }
280
281 public static class BulkReplicationTestObserver extends BaseRegionObserver {
282
283 @Override
284 public void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
285 List<Pair<byte[], String>> familyPaths) throws IOException {
286 BULK_LOADS_COUNT.incrementAndGet();
287 }
288
289 @Override
290 public boolean postBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
291 List<Pair<byte[], String>> familyPaths, boolean hasLoaded) throws IOException {
292 if(hasLoaded) {
293 BULK_LOAD_LATCH.countDown();
294 }
295 return true;
296 }
297
298 }
299 }