View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.client;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  import static org.junit.Assert.fail;
25  
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.Collections;
29  import java.util.HashSet;
30  import java.util.List;
31  import java.util.concurrent.CountDownLatch;
32  import java.util.concurrent.ThreadPoolExecutor;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.hbase.Cell;
37  import org.apache.hadoop.hbase.CellUtil;
38  import org.apache.hadoop.hbase.HBaseTestingUtility;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HRegionLocation;
41  import org.apache.hadoop.hbase.testclassification.MediumTests;
42  import org.apache.hadoop.hbase.ServerName;
43  import org.apache.hadoop.hbase.TableName;
44  import org.apache.hadoop.hbase.Waiter;
45  import org.apache.hadoop.hbase.codec.KeyValueCodec;
46  import org.apache.hadoop.hbase.exceptions.OperationConflictException;
47  import org.apache.hadoop.hbase.util.Bytes;
48  import org.apache.hadoop.hbase.util.JVMClusterUtil;
49  import org.apache.hadoop.hbase.util.Threads;
50  import org.junit.AfterClass;
51  import org.junit.Assert;
52  import org.junit.Before;
53  import org.junit.BeforeClass;
54  import org.junit.Ignore;
55  import org.junit.Test;
56  import org.junit.experimental.categories.Category;
57  
58  @Category(MediumTests.class)
59  public class TestMultiParallel {
60    private static final Log LOG = LogFactory.getLog(TestMultiParallel.class);
61  
62    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
63    private static final byte[] VALUE = Bytes.toBytes("value");
64    private static final byte[] QUALIFIER = Bytes.toBytes("qual");
65    private static final String FAMILY = "family";
66    private static final TableName TEST_TABLE = TableName.valueOf("multi_test_table");
67    private static final TableName TEST_TABLE2 = TableName.valueOf("multi_test_table2");
68    private static final byte[] BYTES_FAMILY = Bytes.toBytes(FAMILY);
69    private static final byte[] ONE_ROW = Bytes.toBytes("xxx");
70    private static final byte [][] KEYS = makeKeys();
71  
72    private static final int slaves = 5; // also used for testing HTable pool size
73    private static Connection CONNECTION;
74  
75    @BeforeClass public static void beforeClass() throws Exception {
76      // Uncomment the following lines if more verbosity is needed for
77      // debugging (see HBASE-12285 for details).
78      //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
79      //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
80      //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
81      UTIL.getConfiguration().set(HConstants.RPC_CODEC_CONF_KEY,
82          KeyValueCodec.class.getCanonicalName());
83      UTIL.startMiniCluster(slaves);
84      HTable t = UTIL.createMultiRegionTable(TEST_TABLE, Bytes.toBytes(FAMILY));
85      UTIL.waitTableEnabled(TEST_TABLE);
86      t.close();
87      CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration());
88    }
89  
90    @AfterClass public static void afterClass() throws Exception {
91      CONNECTION.close();
92      UTIL.shutdownMiniCluster();
93    }
94  
95    @Before public void before() throws Exception {
96      LOG.info("before");
97      if (UTIL.ensureSomeRegionServersAvailable(slaves)) {
98        // Distribute regions
99        UTIL.getMiniHBaseCluster().getMaster().balance();
100 
101       // Wait until completing balance
102       UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
103     }
104     LOG.info("before done");
105   }
106 
107   private static byte[][] makeKeys() {
108     byte [][] starterKeys = HBaseTestingUtility.KEYS;
109     // Create a "non-uniform" test set with the following characteristics:
110     // a) Unequal number of keys per region
111 
112     // Don't use integer as a multiple, so that we have a number of keys that is
113     // not a multiple of the number of regions
114     int numKeys = (int) ((float) starterKeys.length * 10.33F);
115 
116     List<byte[]> keys = new ArrayList<byte[]>();
117     for (int i = 0; i < numKeys; i++) {
118       int kIdx = i % starterKeys.length;
119       byte[] k = starterKeys[kIdx];
120       byte[] cp = new byte[k.length + 1];
121       System.arraycopy(k, 0, cp, 0, k.length);
122       cp[k.length] = new Integer(i % 256).byteValue();
123       keys.add(cp);
124     }
125 
126     // b) Same duplicate keys (showing multiple Gets/Puts to the same row, which
127     // should work)
128     // c) keys are not in sorted order (within a region), to ensure that the
129     // sorting code and index mapping doesn't break the functionality
130     for (int i = 0; i < 100; i++) {
131       int kIdx = i % starterKeys.length;
132       byte[] k = starterKeys[kIdx];
133       byte[] cp = new byte[k.length + 1];
134       System.arraycopy(k, 0, cp, 0, k.length);
135       cp[k.length] = new Integer(i % 256).byteValue();
136       keys.add(cp);
137     }
138     return keys.toArray(new byte [][] {new byte [] {}});
139   }
140 
141 
142   /**
143    * This is for testing the active number of threads that were used while
144    * doing a batch operation. It inserts one row per region via the batch
145    * operation, and then checks the number of active threads.
146    * For HBASE-3553
147    * @throws IOException
148    * @throws InterruptedException
149    * @throws NoSuchFieldException
150    * @throws SecurityException
151    */
152   @Ignore ("Nice bug flakey... expected 5 but was 4..") @Test(timeout=300000)
153   public void testActiveThreadsCount() throws Exception {
154     try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration())) {
155       ThreadPoolExecutor executor = HTable.getDefaultExecutor(UTIL.getConfiguration());
156       try {
157         try (Table t = connection.getTable(TEST_TABLE, executor)) {
158           List<Put> puts = constructPutRequests(); // creates a Put for every region
159           t.batch(puts);
160           HashSet<ServerName> regionservers = new HashSet<ServerName>();
161           try (RegionLocator locator = connection.getRegionLocator(TEST_TABLE)) {
162             for (Row r : puts) {
163               HRegionLocation location = locator.getRegionLocation(r.getRow());
164               regionservers.add(location.getServerName());
165             }
166           }
167           assertEquals(regionservers.size(), executor.getLargestPoolSize());
168         }
169       } finally {
170         executor.shutdownNow();
171       }
172     }
173   }
174 
175   @Test(timeout=300000)
176   public void testBatchWithGet() throws Exception {
177     LOG.info("test=testBatchWithGet");
178     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
179 
180     // load test data
181     List<Put> puts = constructPutRequests();
182     table.batch(puts);
183 
184     // create a list of gets and run it
185     List<Row> gets = new ArrayList<Row>();
186     for (byte[] k : KEYS) {
187       Get get = new Get(k);
188       get.addColumn(BYTES_FAMILY, QUALIFIER);
189       gets.add(get);
190     }
191     Result[] multiRes = new Result[gets.size()];
192     table.batch(gets, multiRes);
193 
194     // Same gets using individual call API
195     List<Result> singleRes = new ArrayList<Result>();
196     for (Row get : gets) {
197       singleRes.add(table.get((Get) get));
198     }
199     // Compare results
200     Assert.assertEquals(singleRes.size(), multiRes.length);
201     for (int i = 0; i < singleRes.size(); i++) {
202       Assert.assertTrue(singleRes.get(i).containsColumn(BYTES_FAMILY, QUALIFIER));
203       Cell[] singleKvs = singleRes.get(i).rawCells();
204       Cell[] multiKvs = multiRes[i].rawCells();
205       for (int j = 0; j < singleKvs.length; j++) {
206         Assert.assertEquals(singleKvs[j], multiKvs[j]);
207         Assert.assertEquals(0, Bytes.compareTo(CellUtil.cloneValue(singleKvs[j]),
208             CellUtil.cloneValue(multiKvs[j])));
209       }
210     }
211     table.close();
212   }
213 
214   @Test
215   public void testBadFam() throws Exception {
216     LOG.info("test=testBadFam");
217     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
218 
219     List<Row> actions = new ArrayList<Row>();
220     Put p = new Put(Bytes.toBytes("row1"));
221     p.add(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
222     actions.add(p);
223     p = new Put(Bytes.toBytes("row2"));
224     p.add(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
225     actions.add(p);
226 
227     // row1 and row2 should be in the same region.
228 
229     Object [] r = new Object[actions.size()];
230     try {
231       table.batch(actions, r);
232       fail();
233     } catch (RetriesExhaustedWithDetailsException ex) {
234       LOG.debug(ex);
235       // good!
236       assertFalse(ex.mayHaveClusterIssues());
237     }
238     assertEquals(2, r.length);
239     assertTrue(r[0] instanceof Throwable);
240     assertTrue(r[1] instanceof Result);
241     table.close();
242   }
243 
244   @Test (timeout=300000)
245   public void testFlushCommitsNoAbort() throws Exception {
246     LOG.info("test=testFlushCommitsNoAbort");
247     doTestFlushCommits(false);
248   }
249 
250   /**
251    * Only run one Multi test with a forced RegionServer abort. Otherwise, the
252    * unit tests will take an unnecessarily long time to run.
253    *
254    * @throws Exception
255    */
256   @Test (timeout=360000)
257   public void testFlushCommitsWithAbort() throws Exception {
258     LOG.info("test=testFlushCommitsWithAbort");
259     doTestFlushCommits(true);
260   }
261 
262   /**
263    * Set table auto flush to false and test flushing commits
264    * @param doAbort true if abort one regionserver in the testing
265    * @throws Exception
266    */
267   private void doTestFlushCommits(boolean doAbort) throws Exception {
268     // Load the data
269     LOG.info("get new table");
270     Table table = UTIL.getConnection().getTable(TEST_TABLE);
271     table.setWriteBufferSize(10 * 1024 * 1024);
272 
273     LOG.info("constructPutRequests");
274     List<Put> puts = constructPutRequests();
275     table.put(puts);
276     LOG.info("puts");
277     final int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()
278         .size();
279     assert liveRScount > 0;
280     JVMClusterUtil.RegionServerThread liveRS = UTIL.getMiniHBaseCluster()
281         .getLiveRegionServerThreads().get(0);
282     if (doAbort) {
283       liveRS.getRegionServer().abort("Aborting for tests",
284           new Exception("doTestFlushCommits"));
285       // If we wait for no regions being online after we abort the server, we
286       // could ensure the master has re-assigned the regions on killed server
287       // after writing successfully. It means the server we aborted is dead
288       // and detected by matser
289       while (liveRS.getRegionServer().getNumberOfOnlineRegions() != 0) {
290         Thread.sleep(100);
291       }
292       // try putting more keys after the abort. same key/qual... just validating
293       // no exceptions thrown
294       puts = constructPutRequests();
295       table.put(puts);
296     }
297 
298     LOG.info("validating loaded data");
299     validateLoadedData(table);
300 
301     // Validate server and region count
302     List<JVMClusterUtil.RegionServerThread> liveRSs = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads();
303     int count = 0;
304     for (JVMClusterUtil.RegionServerThread t: liveRSs) {
305       count++;
306       LOG.info("Count=" + count + ", Alive=" + t.getRegionServer());
307     }
308     LOG.info("Count=" + count);
309     Assert.assertEquals("Server count=" + count + ", abort=" + doAbort,
310         (doAbort ? (liveRScount - 1) : liveRScount), count);
311     if (doAbort) {
312       UTIL.getMiniHBaseCluster().waitOnRegionServer(0);
313       UTIL.waitFor(15 * 1000, new Waiter.Predicate<Exception>() {
314         @Override
315         public boolean evaluate() throws Exception {
316           return UTIL.getMiniHBaseCluster().getMaster()
317               .getClusterStatus().getServersSize() == (liveRScount - 1);
318         }
319       });
320       UTIL.waitFor(15 * 1000, UTIL.predicateNoRegionsInTransition());
321     }
322 
323     table.close();
324     LOG.info("done");
325   }
326 
327   @Test (timeout=300000)
328   public void testBatchWithPut() throws Exception {
329     LOG.info("test=testBatchWithPut");
330     Table table = CONNECTION.getTable(TEST_TABLE);
331     // put multiple rows using a batch
332     List<Put> puts = constructPutRequests();
333 
334     Object[] results = table.batch(puts);
335     validateSizeAndEmpty(results, KEYS.length);
336 
337     if (true) {
338       int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size();
339       assert liveRScount > 0;
340       JVMClusterUtil.RegionServerThread liveRS =
341         UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0);
342       liveRS.getRegionServer().abort("Aborting for tests", new Exception("testBatchWithPut"));
343       puts = constructPutRequests();
344       try {
345         results = table.batch(puts);
346       } catch (RetriesExhaustedWithDetailsException ree) {
347         LOG.info(ree.getExhaustiveDescription());
348         table.close();
349         throw ree;
350       }
351       validateSizeAndEmpty(results, KEYS.length);
352     }
353 
354     validateLoadedData(table);
355     table.close();
356   }
357 
358   @Test(timeout=300000)
359   public void testBatchWithDelete() throws Exception {
360     LOG.info("test=testBatchWithDelete");
361     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
362 
363     // Load some data
364     List<Put> puts = constructPutRequests();
365     Object[] results = table.batch(puts);
366     validateSizeAndEmpty(results, KEYS.length);
367 
368     // Deletes
369     List<Row> deletes = new ArrayList<Row>();
370     for (int i = 0; i < KEYS.length; i++) {
371       Delete delete = new Delete(KEYS[i]);
372       delete.addFamily(BYTES_FAMILY);
373       deletes.add(delete);
374     }
375     results = table.batch(deletes);
376     validateSizeAndEmpty(results, KEYS.length);
377 
378     // Get to make sure ...
379     for (byte[] k : KEYS) {
380       Get get = new Get(k);
381       get.addColumn(BYTES_FAMILY, QUALIFIER);
382       Assert.assertFalse(table.exists(get));
383     }
384     table.close();
385   }
386 
387   @Test(timeout=300000)
388   public void testHTableDeleteWithList() throws Exception {
389     LOG.info("test=testHTableDeleteWithList");
390     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
391 
392     // Load some data
393     List<Put> puts = constructPutRequests();
394     Object[] results = table.batch(puts);
395     validateSizeAndEmpty(results, KEYS.length);
396 
397     // Deletes
398     ArrayList<Delete> deletes = new ArrayList<Delete>();
399     for (int i = 0; i < KEYS.length; i++) {
400       Delete delete = new Delete(KEYS[i]);
401       delete.deleteFamily(BYTES_FAMILY);
402       deletes.add(delete);
403     }
404     table.delete(deletes);
405     Assert.assertTrue(deletes.isEmpty());
406 
407     // Get to make sure ...
408     for (byte[] k : KEYS) {
409       Get get = new Get(k);
410       get.addColumn(BYTES_FAMILY, QUALIFIER);
411       Assert.assertFalse(table.exists(get));
412     }
413     table.close();
414   }
415 
416   @Test(timeout=300000)
417   public void testBatchWithManyColsInOneRowGetAndPut() throws Exception {
418     LOG.info("test=testBatchWithManyColsInOneRowGetAndPut");
419     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
420 
421     List<Row> puts = new ArrayList<Row>();
422     for (int i = 0; i < 100; i++) {
423       Put put = new Put(ONE_ROW);
424       byte[] qual = Bytes.toBytes("column" + i);
425       put.add(BYTES_FAMILY, qual, VALUE);
426       puts.add(put);
427     }
428     Object[] results = table.batch(puts);
429 
430     // validate
431     validateSizeAndEmpty(results, 100);
432 
433     // get the data back and validate that it is correct
434     List<Row> gets = new ArrayList<Row>();
435     for (int i = 0; i < 100; i++) {
436       Get get = new Get(ONE_ROW);
437       byte[] qual = Bytes.toBytes("column" + i);
438       get.addColumn(BYTES_FAMILY, qual);
439       gets.add(get);
440     }
441 
442     Object[] multiRes = table.batch(gets);
443 
444     int idx = 0;
445     for (Object r : multiRes) {
446       byte[] qual = Bytes.toBytes("column" + idx);
447       validateResult(r, qual, VALUE);
448       idx++;
449     }
450     table.close();
451   }
452 
453   @Test(timeout=300000)
454   public void testBatchWithIncrementAndAppend() throws Exception {
455     LOG.info("test=testBatchWithIncrementAndAppend");
456     final byte[] QUAL1 = Bytes.toBytes("qual1");
457     final byte[] QUAL2 = Bytes.toBytes("qual2");
458     final byte[] QUAL3 = Bytes.toBytes("qual3");
459     final byte[] QUAL4 = Bytes.toBytes("qual4");
460     Table table = new HTable(UTIL.getConfiguration(), TEST_TABLE);
461     Delete d = new Delete(ONE_ROW);
462     table.delete(d);
463     Put put = new Put(ONE_ROW);
464     put.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("abc"));
465     put.add(BYTES_FAMILY, QUAL2, Bytes.toBytes(1L));
466     table.put(put);
467 
468     Increment inc = new Increment(ONE_ROW);
469     inc.addColumn(BYTES_FAMILY, QUAL2, 1);
470     inc.addColumn(BYTES_FAMILY, QUAL3, 1);
471 
472     Append a = new Append(ONE_ROW);
473     a.add(BYTES_FAMILY, QUAL1, Bytes.toBytes("def"));
474     a.add(BYTES_FAMILY, QUAL4, Bytes.toBytes("xyz"));
475     List<Row> actions = new ArrayList<Row>();
476     actions.add(inc);
477     actions.add(a);
478 
479     Object[] multiRes = table.batch(actions);
480     validateResult(multiRes[1], QUAL1, Bytes.toBytes("abcdef"));
481     validateResult(multiRes[1], QUAL4, Bytes.toBytes("xyz"));
482     validateResult(multiRes[0], QUAL2, Bytes.toBytes(2L));
483     validateResult(multiRes[0], QUAL3, Bytes.toBytes(1L));
484     table.close();
485   }
486 
487   @Test(timeout=300000)
488   public void testNonceCollision() throws Exception {
489     LOG.info("test=testNonceCollision");
490     final Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
491     Table table = connection.getTable(TEST_TABLE);
492     Put put = new Put(ONE_ROW);
493     put.add(BYTES_FAMILY, QUALIFIER, Bytes.toBytes(0L));
494 
495     // Replace nonce manager with the one that returns each nonce twice.
496     NonceGenerator cnm = new PerClientRandomNonceGenerator() {
497       long lastNonce = -1;
498       @Override
499       public synchronized long newNonce() {
500         long nonce = 0;
501         if (lastNonce == -1) {
502           lastNonce = nonce = super.newNonce();
503         } else {
504           nonce = lastNonce;
505           lastNonce = -1L;
506         }
507         return nonce;
508       }
509     };
510 
511     NonceGenerator oldCnm =
512       ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection)connection, cnm);
513 
514     // First test sequential requests.
515     try {
516       Increment inc = new Increment(ONE_ROW);
517       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
518       table.increment(inc);
519       inc = new Increment(ONE_ROW);
520       inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
521 
522       // duplicate increment
523       Result result = table.increment(inc);
524       validateResult(result, QUALIFIER, Bytes.toBytes(1L));
525 
526       Get get = new Get(ONE_ROW);
527       get.addColumn(BYTES_FAMILY, QUALIFIER);
528       result = table.get(get);
529       validateResult(result, QUALIFIER, Bytes.toBytes(1L));
530 
531       // Now run a bunch of requests in parallel, exactly half should succeed.
532       int numRequests = 40;
533       final CountDownLatch startedLatch = new CountDownLatch(numRequests);
534       final CountDownLatch startLatch = new CountDownLatch(1);
535       final CountDownLatch doneLatch = new CountDownLatch(numRequests);
536       for (int i = 0; i < numRequests; ++i) {
537         Runnable r = new Runnable() {
538           @Override
539           public void run() {
540             Table table = null;
541             try {
542               table = connection.getTable(TEST_TABLE);
543             } catch (IOException e) {
544               fail("Not expected");
545             }
546             Increment inc = new Increment(ONE_ROW);
547             inc.addColumn(BYTES_FAMILY, QUALIFIER, 1L);
548             startedLatch.countDown();
549             try {
550               startLatch.await();
551             } catch (InterruptedException e) {
552               fail("Not expected");
553             }
554             try {
555               table.increment(inc);
556             } catch (IOException ioEx) {
557               fail("Not expected");
558             }
559             doneLatch.countDown();
560           }
561         };
562         Threads.setDaemonThreadRunning(new Thread(r));
563       }
564       startedLatch.await(); // Wait until all threads are ready...
565       startLatch.countDown(); // ...and unleash the herd!
566       doneLatch.await();
567       // Now verify
568       get = new Get(ONE_ROW);
569       get.addColumn(BYTES_FAMILY, QUALIFIER);
570       result = table.get(get);
571       validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L));
572       table.close();
573     } finally {
574       ConnectionManager.injectNonceGeneratorForTesting((ClusterConnection)connection, oldCnm);
575     }
576   }
577 
578   @Test(timeout=300000)
579   public void testBatchWithMixedActions() throws Exception {
580     LOG.info("test=testBatchWithMixedActions");
581     Table table = UTIL.getConnection().getTable(TEST_TABLE);
582 
583     // Load some data to start
584     List<Put> puts = constructPutRequests();
585     Object[] results = new Object[puts.size()];
586     table.batch(puts, results);
587     validateSizeAndEmpty(results, KEYS.length);
588 
589     // Batch: get, get, put(new col), delete, get, get of put, get of deleted,
590     // put
591     List<Row> actions = new ArrayList<Row>();
592 
593     byte[] qual2 = Bytes.toBytes("qual2");
594     byte[] val2 = Bytes.toBytes("putvalue2");
595 
596     // 0 get
597     Get get = new Get(KEYS[10]);
598     get.addColumn(BYTES_FAMILY, QUALIFIER);
599     actions.add(get);
600 
601     // 1 get
602     get = new Get(KEYS[11]);
603     get.addColumn(BYTES_FAMILY, QUALIFIER);
604     actions.add(get);
605 
606     // 2 put of new column
607     Put put = new Put(KEYS[10]);
608     put.addColumn(BYTES_FAMILY, qual2, val2);
609     actions.add(put);
610 
611     // 3 delete
612     Delete delete = new Delete(KEYS[20]);
613     delete.addFamily(BYTES_FAMILY);
614     actions.add(delete);
615 
616     // 4 get
617     get = new Get(KEYS[30]);
618     get.addColumn(BYTES_FAMILY, QUALIFIER);
619     actions.add(get);
620 
621     // There used to be a 'get' of a previous put here, but removed
622     // since this API really cannot guarantee order in terms of mixed
623     // get/puts.
624 
625     // 5 put of new column
626     put = new Put(KEYS[40]);
627     put.addColumn(BYTES_FAMILY, qual2, val2);
628     actions.add(put);
629 
630     // 6 RowMutations
631     RowMutations rm = new RowMutations(KEYS[50]);
632     put = new Put(KEYS[50]);
633     put.addColumn(BYTES_FAMILY, qual2, val2);
634     rm.add(put);
635     byte[] qual3 = Bytes.toBytes("qual3");
636     byte[] val3 = Bytes.toBytes("putvalue3");
637     put = new Put(KEYS[50]);
638     put.addColumn(BYTES_FAMILY, qual3, val3);
639     rm.add(put);
640     actions.add(rm);
641 
642     // 7 Add another Get to the mixed sequence after RowMutations
643     get = new Get(KEYS[10]);
644     get.addColumn(BYTES_FAMILY, QUALIFIER);
645     actions.add(get);
646 
647     results = new Object[actions.size()];
648     table.batch(actions, results);
649 
650     // Validation
651 
652     validateResult(results[0]);
653     validateResult(results[1]);
654     validateEmpty(results[3]);
655     validateResult(results[4]);
656     validateEmpty(results[5]);
657     validateEmpty(results[6]);
658     validateResult(results[7]);
659 
660     // validate last put, externally from the batch
661     get = new Get(KEYS[40]);
662     get.addColumn(BYTES_FAMILY, qual2);
663     Result r = table.get(get);
664     validateResult(r, qual2, val2);
665 
666     // validate last RowMutations, externally from the batch
667     get = new Get(KEYS[50]);
668     get.addColumn(BYTES_FAMILY, qual2);
669     r = table.get(get);
670     validateResult(r, qual2, val2);
671 
672     get = new Get(KEYS[50]);
673     get.addColumn(BYTES_FAMILY, qual3);
674     r = table.get(get);
675     validateResult(r, qual3, val3);
676 
677     table.close();
678   }
679 
680   // // Helper methods ////
681 
682   private void validateResult(Object r) {
683     validateResult(r, QUALIFIER, VALUE);
684   }
685 
686   private void validateResult(Object r1, byte[] qual, byte[] val) {
687     Result r = (Result)r1;
688     Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual));
689     byte[] value = r.getValue(BYTES_FAMILY, qual);
690     if (0 != Bytes.compareTo(val, value)) {
691       fail("Expected [" + Bytes.toStringBinary(val)
692           + "] but got [" + Bytes.toStringBinary(value) + "]");
693     }
694   }
695 
696   private List<Put> constructPutRequests() {
697     List<Put> puts = new ArrayList<>();
698     for (byte[] k : KEYS) {
699       Put put = new Put(k);
700       put.add(BYTES_FAMILY, QUALIFIER, VALUE);
701       puts.add(put);
702     }
703     return puts;
704   }
705 
706   private void validateLoadedData(Table table) throws IOException {
707     // get the data back and validate that it is correct
708     LOG.info("Validating data on " + table);
709     List<Get> gets = new ArrayList<Get>();
710     for (byte[] k : KEYS) {
711       Get get = new Get(k);
712       get.addColumn(BYTES_FAMILY, QUALIFIER);
713       gets.add(get);
714     }
715     int retryNum = 10;
716     Result[] results = null;
717     do  {
718       boolean finished = false;
719       results = table.get(gets);
720       if (results != null) {
721         finished = true;
722         for (Result result : results) {
723           if (result.isEmpty()) {
724             finished = false;
725             break;
726           }
727         }
728       }
729       if (finished) {
730         break;
731       }
732       try {
733         Thread.sleep(10);
734       } catch (InterruptedException e) {
735       }
736       retryNum--;
737     } while (retryNum > 0);
738 
739     if (retryNum == 0) {
740       fail("Timeout for validate data");
741     } else {
742       if (results != null) {
743         for (Result r : results) {
744           Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER));
745           Assert.assertEquals(0, Bytes.compareTo(VALUE, r
746             .getValue(BYTES_FAMILY, QUALIFIER)));
747         }
748         LOG.info("Validating data on " + table + " successfully!");
749       }
750     }
751   }
752 
753   private void validateEmpty(Object r1) {
754     Result result = (Result)r1;
755     Assert.assertTrue(result != null);
756     Assert.assertTrue(result.isEmpty());
757   }
758 
759   private void validateSizeAndEmpty(Object[] results, int expectedSize) {
760     // Validate got back the same number of Result objects, all empty
761     Assert.assertEquals(expectedSize, results.length);
762     for (Object result : results) {
763       validateEmpty(result);
764     }
765   }
766 
767   private static class MultiThread extends Thread {
768     public Throwable throwable = null;
769     private CountDownLatch endLatch;
770     private CountDownLatch beginLatch;
771     List<Put> puts;
772     public MultiThread(List<Put> puts, CountDownLatch beginLatch, CountDownLatch endLatch) {
773       this.puts = puts;
774       this.beginLatch = beginLatch;
775       this.endLatch = endLatch;
776     }
777     @Override
778     public void run() {
779       LOG.info("Start multi");
780       HTable table = null;
781       try {
782         table = new HTable(UTIL.getConfiguration(), TEST_TABLE2);
783         table.setAutoFlush(false);
784         beginLatch.await();
785         for (int i = 0; i < 100; i++) {
786           for(Put put : puts) {
787             table.put(put);
788           }
789           table.flushCommits();
790         }
791       } catch (Throwable t) {
792         throwable = t;
793         LOG.warn("Error when put:", t);
794       } finally {
795         endLatch.countDown();
796         if(table != null) {
797           try {
798             table.close();
799           } catch (IOException ioe) {
800             LOG.error("Error when close table", ioe);
801           }
802         }
803       }
804       LOG.info("End multi");
805     }
806   }
807 
808 
809   private static class IncrementThread extends Thread {
810     public Throwable throwable = null;
811     private CountDownLatch endLatch;
812     private CountDownLatch beginLatch;
813     List<Put> puts;
814     public IncrementThread(List<Put> puts, CountDownLatch beginLatch, CountDownLatch endLatch) {
815       this.puts = puts;
816       this.beginLatch = beginLatch;
817       this.endLatch = endLatch;
818     }
819     @Override
820     public void run() {
821       LOG.info("Start inc");
822       HTable table = null;
823       try {
824         table = new HTable(UTIL.getConfiguration(), TEST_TABLE2);
825         beginLatch.await();
826         for (int i = 0; i < 100; i++) {
827           for(Put put : puts) {
828             Increment inc = new Increment(put.getRow());
829             inc.addColumn(BYTES_FAMILY, BYTES_FAMILY, 1);
830             table.increment(inc);
831           }
832         }
833       } catch (Throwable t) {
834         throwable = t;
835         LOG.warn("Error when incr:", t);
836       } finally {
837         endLatch.countDown();
838         if(table != null) {
839           try {
840             table.close();
841           } catch (IOException ioe) {
842             LOG.error("Error when close table", ioe);
843           }
844         }
845       }
846       LOG.info("End inc");
847     }
848   }
849 
850   /**
851    * UT for HBASE-18233, test for disordered batch mutation thread and
852    * increment won't lock each other
853    * @throws Exception if any error occurred
854    */
855   @Test(timeout=300000)
856   public void testMultiThreadWithRowLocks() throws Exception {
857     //set a short timeout to get timeout exception when getting row lock fail
858     UTIL.getConfiguration().setInt("hbase.rpc.timeout", 2000);
859     UTIL.getConfiguration().setInt("hbase.client.operation.timeout", 4000);
860     UTIL.getConfiguration().setInt("hbase.client.retries.number", 10);
861 
862     UTIL.createTable(TEST_TABLE2, BYTES_FAMILY);
863     List<Put> puts = new ArrayList<>();
864     for(int i = 0; i < 10; i++) {
865       Put put = new Put(Bytes.toBytes(i));
866       put.add(BYTES_FAMILY, BYTES_FAMILY, Bytes.toBytes((long)0));
867       puts.add(put);
868     }
869     List<Put> reversePuts = new ArrayList<>(puts);
870     Collections.reverse(reversePuts);
871     int NUM_OF_THREAD = 12;
872     CountDownLatch latch = new CountDownLatch(NUM_OF_THREAD);
873     CountDownLatch beginLatch = new CountDownLatch(1);
874     int threadNum = NUM_OF_THREAD / 4;
875     List<MultiThread> multiThreads = new ArrayList<>();
876     List<IncrementThread> incThreads = new ArrayList<>();
877     for(int i = 0; i < threadNum; i ++) {
878       MultiThread thread = new MultiThread(reversePuts, beginLatch, latch);
879       thread.start();
880       multiThreads.add(thread);
881     }
882     for(int i = 0; i < threadNum; i++) {
883       MultiThread thread = new MultiThread(puts, beginLatch, latch);
884       thread.start();
885       multiThreads.add(thread);
886     }
887     for(int i = 0; i < threadNum; i ++) {
888       IncrementThread thread = new IncrementThread(reversePuts, beginLatch, latch);
889       thread.start();
890       incThreads.add(thread);
891     }
892     for(int i = 0; i < threadNum; i++) {
893       IncrementThread thread = new IncrementThread(puts, beginLatch, latch);
894       thread.start();
895       incThreads.add(thread);
896     }
897     long timeBegin = System.currentTimeMillis();
898     beginLatch.countDown();
899     latch.await();
900     LOG.error("Time took:" + (System.currentTimeMillis() - timeBegin));
901     for(MultiThread thread : multiThreads) {
902       if (thread != null && thread.throwable != null) {
903         LOG.error(thread.throwable);
904       }
905       Assert.assertTrue(thread.throwable == null);
906     }
907     for(IncrementThread thread : incThreads) {
908       Assert.assertTrue(thread.throwable == null);
909     }
910 
911   }
912 }