View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
20  import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertNotNull;
23  import static org.junit.Assert.assertNull;
24  import static org.junit.Assert.assertTrue;
25  import static org.junit.Assert.fail;
26  
27  import java.io.IOException;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.List;
31  import java.util.Random;
32  import java.util.concurrent.CountDownLatch;
33  import java.util.concurrent.atomic.AtomicInteger;
34  import java.util.concurrent.atomic.AtomicLong;
35  
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.conf.Configuration;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.Cell;
42  import org.apache.hadoop.hbase.CellUtil;
43  import org.apache.hadoop.hbase.HBaseTestingUtility;
44  import org.apache.hadoop.hbase.HColumnDescriptor;
45  import org.apache.hadoop.hbase.HConstants;
46  import org.apache.hadoop.hbase.HRegionInfo;
47  import org.apache.hadoop.hbase.HTableDescriptor;
48  import org.apache.hadoop.hbase.MultithreadedTestUtil;
49  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
50  import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
51  import org.apache.hadoop.hbase.TableName;
52  import org.apache.hadoop.hbase.client.Append;
53  import org.apache.hadoop.hbase.client.Delete;
54  import org.apache.hadoop.hbase.client.Durability;
55  import org.apache.hadoop.hbase.client.Get;
56  import org.apache.hadoop.hbase.client.Increment;
57  import org.apache.hadoop.hbase.client.Mutation;
58  import org.apache.hadoop.hbase.client.Put;
59  import org.apache.hadoop.hbase.client.Result;
60  import org.apache.hadoop.hbase.client.RowMutations;
61  import org.apache.hadoop.hbase.client.Scan;
62  import org.apache.hadoop.hbase.filter.BinaryComparator;
63  import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
64  import org.apache.hadoop.hbase.io.HeapSize;
65  import org.apache.hadoop.hbase.io.hfile.BlockCache;
66  import org.apache.hadoop.hbase.testclassification.MediumTests;
67  import org.apache.hadoop.hbase.util.Bytes;
68  import org.apache.hadoop.hbase.wal.WAL;
69  import org.junit.After;
70  import org.junit.Before;
71  import org.junit.Rule;
72  import org.junit.Test;
73  import org.junit.experimental.categories.Category;
74  import org.junit.rules.TestName;
75  
76  /**
77   * Testing of HRegion.incrementColumnValue, HRegion.increment,
78   * and HRegion.append
79   */
80  @Category(MediumTests.class) // Starts 100 threads
81  public class TestAtomicOperation {
82    private static final Log LOG = LogFactory.getLog(TestAtomicOperation.class);
83    @Rule public TestName name = new TestName();
84  
85    HRegion region = null;
86    private HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
87  
88    // Test names
89    static  byte[] tableName;
90    static final byte[] qual1 = Bytes.toBytes("qual1");
91    static final byte[] qual2 = Bytes.toBytes("qual2");
92    static final byte[] qual3 = Bytes.toBytes("qual3");
93    static final byte[] value1 = Bytes.toBytes("value1");
94    static final byte[] value2 = Bytes.toBytes("value2");
95    static final byte [] row = Bytes.toBytes("rowA");
96    static final byte [] row2 = Bytes.toBytes("rowB");
97  
98    @Before
99    public void setup() {
100     tableName = Bytes.toBytes(name.getMethodName());
101   }
102 
103   @After
104   public void teardown() throws IOException {
105     if (region != null) {
106       BlockCache bc = region.getStores().get(0).getCacheConfig().getBlockCache();
107       ((HRegion)region).close();
108       WAL wal = ((HRegion)region).getWAL();
109       if (wal != null) wal.close();
110       if (bc != null) bc.shutdown();
111       region = null;
112     }
113   }
114   //////////////////////////////////////////////////////////////////////////////
115   // New tests that doesn't spin up a mini cluster but rather just test the
116   // individual code pieces in the HRegion.
117   //////////////////////////////////////////////////////////////////////////////
118 
119   /**
120    * Test basic append operation.
121    * More tests in
122    * @see org.apache.hadoop.hbase.client.TestFromClientSide#testAppend()
123    */
124   @Test
125   public void testAppend() throws IOException {
126     initHRegion(tableName, name.getMethodName(), fam1);
127     String v1 = "Ultimate Answer to the Ultimate Question of Life,"+
128     " The Universe, and Everything";
129     String v2 = " is... 42.";
130     Append a = new Append(row);
131     a.setReturnResults(false);
132     a.add(fam1, qual1, Bytes.toBytes(v1));
133     a.add(fam1, qual2, Bytes.toBytes(v2));
134     assertNull(region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE));
135     a = new Append(row);
136     a.add(fam1, qual1, Bytes.toBytes(v2));
137     a.add(fam1, qual2, Bytes.toBytes(v1));
138     Result result = region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE);
139     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1+v2), result.getValue(fam1, qual1)));
140     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2+v1), result.getValue(fam1, qual2)));
141   }
142 
143   @Test
144   public void testAppendWithMultipleFamilies() throws IOException {
145     final byte[] fam3 = Bytes.toBytes("colfamily31");
146     initHRegion(tableName, name.getMethodName(), fam1, fam2, fam3);
147     String v1 = "Appended";
148     String v2 = "Value";
149 
150     Append a = new Append(row);
151     a.setReturnResults(false);
152     a.add(fam1, qual1, Bytes.toBytes(v1));
153     a.add(fam2, qual2, Bytes.toBytes(v2));
154     assertNull(region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE));
155 
156     a = new Append(row);
157     a.add(fam2, qual2, Bytes.toBytes(v1));
158     a.add(fam1, qual1, Bytes.toBytes(v2));
159     a.add(fam3, qual3, Bytes.toBytes(v2));
160     a.add(fam1, qual2, Bytes.toBytes(v1));
161 
162     Result result = region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE);
163 
164     byte[] actualValue1 = result.getValue(fam1, qual1);
165     byte[] actualValue2 = result.getValue(fam2, qual2);
166     byte[] actualValue3 = result.getValue(fam3, qual3);
167     byte[] actualValue4 = result.getValue(fam1, qual2);
168 
169     assertNotNull("Value1 should bot be null", actualValue1);
170     assertNotNull("Value2 should bot be null", actualValue2);
171     assertNotNull("Value3 should bot be null", actualValue3);
172     assertNotNull("Value4 should bot be null", actualValue4);
173     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1 + v2), actualValue1));
174     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2 + v1), actualValue2));
175     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2), actualValue3));
176     assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1), actualValue4));
177   }
178 
179   @Test
180   public void testAppendWithNonExistingFamily() throws IOException {
181     initHRegion(tableName, name.getMethodName(), fam1);
182     final String v1 = "Value";
183     final Append a = new Append(row);
184     a.add(fam1, qual1, Bytes.toBytes(v1));
185     a.add(fam2, qual2, Bytes.toBytes(v1));
186     Result result = null;
187     try {
188       result = region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE);
189       fail("Append operation should fail with NoSuchColumnFamilyException.");
190     } catch (NoSuchColumnFamilyException e) {
191       assertEquals(null, result);
192     } catch (Exception e) {
193       fail("Append operation should fail with NoSuchColumnFamilyException.");
194     }
195   }
196 
197   @Test
198   public void testIncrementWithNonExistingFamily() throws IOException {
199     initHRegion(tableName, name.getMethodName(), fam1);
200     final Increment inc = new Increment(row);
201     inc.addColumn(fam1, qual1, 1);
202     inc.addColumn(fam2, qual2, 1);
203     inc.setDurability(Durability.ASYNC_WAL);
204     try {
205       region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
206     } catch (NoSuchColumnFamilyException e) {
207       final Get g = new Get(row);
208       final Result result = region.get(g);
209       assertEquals(null, result.getValue(fam1, qual1));
210       assertEquals(null, result.getValue(fam2, qual2));
211     } catch (Exception e) {
212       fail("Increment operation should fail with NoSuchColumnFamilyException.");
213     }
214   }
215 
216   /**
217    * Test multi-threaded increments.
218    */
219   @Test
220   public void testIncrementMultiThreads() throws IOException {
221     LOG.info("Starting test testIncrementMultiThreads");
222     // run a with mixed column families (1 and 3 versions)
223     initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
224 
225     // Create 100 threads, each will increment by its own quantity
226     int numThreads = 100;
227     int incrementsPerThread = 1000;
228     Incrementer[] all = new Incrementer[numThreads];
229     int expectedTotal = 0;
230     // create all threads
231     for (int i = 0; i < numThreads; i++) {
232       all[i] = new Incrementer(region, i, i, incrementsPerThread);
233       expectedTotal += (i * incrementsPerThread);
234     }
235 
236     // run all threads
237     for (int i = 0; i < numThreads; i++) {
238       all[i].start();
239     }
240 
241     // wait for all threads to finish
242     for (int i = 0; i < numThreads; i++) {
243       try {
244         all[i].join();
245       } catch (InterruptedException e) {
246         LOG.info("Ignored", e);
247       }
248     }
249     assertICV(row, fam1, qual1, expectedTotal);
250     assertICV(row, fam1, qual2, expectedTotal*2);
251     assertICV(row, fam2, qual3, expectedTotal*3);
252     LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal);
253   }
254 
255 
256   private void assertICV(byte [] row,
257                          byte [] familiy,
258                          byte[] qualifier,
259                          long amount) throws IOException {
260     // run a get and see?
261     Get get = new Get(row);
262     get.addColumn(familiy, qualifier);
263     Result result = region.get(get);
264     assertEquals(1, result.size());
265 
266     Cell kv = result.rawCells()[0];
267     long r = Bytes.toLong(CellUtil.cloneValue(kv));
268     assertEquals(amount, r);
269   }
270 
271   private void initHRegion (byte [] tableName, String callingMethod,
272       byte[] ... families)
273     throws IOException {
274     initHRegion(tableName, callingMethod, null, families);
275   }
276 
277   private void initHRegion (byte [] tableName, String callingMethod, int [] maxVersions,
278     byte[] ... families)
279   throws IOException {
280     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
281     int i=0;
282     for(byte [] family : families) {
283       HColumnDescriptor hcd = new HColumnDescriptor(family);
284       hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1);
285       htd.addFamily(hcd);
286     }
287     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
288     region = TEST_UTIL.createLocalHRegion(info, htd);
289   }
290 
291   /**
292    * A thread that makes a few increment calls
293    */
294   public static class Incrementer extends Thread {
295 
296     private final Region region;
297     private final int numIncrements;
298     private final int amount;
299 
300 
301     public Incrementer(Region region,
302         int threadNumber, int amount, int numIncrements) {
303       super("incrementer." + threadNumber);
304       this.region = region;
305       this.numIncrements = numIncrements;
306       this.amount = amount;
307       setDaemon(true);
308     }
309 
310     @Override
311     public void run() {
312       for (int i = 0; i < numIncrements; i++) {
313         try {
314           Increment inc = new Increment(row);
315           inc.addColumn(fam1, qual1, amount);
316           inc.addColumn(fam1, qual2, amount*2);
317           inc.addColumn(fam2, qual3, amount*3);
318           inc.setDurability(Durability.ASYNC_WAL);
319           region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
320 
321           // verify: Make sure we only see completed increments
322           Get g = new Get(row);
323           Result result = region.get(g);
324           if (result != null) {
325             assertTrue(result.getValue(fam1, qual1) != null);
326             assertTrue(result.getValue(fam1, qual2) != null);
327             assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*2,
328               Bytes.toLong(result.getValue(fam1, qual2)));
329             assertTrue(result.getValue(fam2, qual3) != null);
330             assertEquals(Bytes.toLong(result.getValue(fam1, qual1))*3,
331               Bytes.toLong(result.getValue(fam2, qual3)));
332           }
333         } catch (IOException e) {
334           e.printStackTrace();
335         }
336       }
337     }
338   }
339 
340   @Test
341   public void testAppendMultiThreads() throws IOException {
342     LOG.info("Starting test testAppendMultiThreads");
343     // run a with mixed column families (1 and 3 versions)
344     initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
345 
346     int numThreads = 100;
347     int opsPerThread = 100;
348     AtomicOperation[] all = new AtomicOperation[numThreads];
349     final byte[] val = new byte[]{1};
350 
351     AtomicInteger failures = new AtomicInteger(0);
352     // create all threads
353     for (int i = 0; i < numThreads; i++) {
354       all[i] = new AtomicOperation(region, opsPerThread, null, failures) {
355         @Override
356         public void run() {
357           for (int i=0; i<numOps; i++) {
358             try {
359               Append a = new Append(row);
360               a.add(fam1, qual1, val);
361               a.add(fam1, qual2, val);
362               a.add(fam2, qual3, val);
363               a.setDurability(Durability.ASYNC_WAL);
364               region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE);
365 
366               Get g = new Get(row);
367               Result result = region.get(g);
368               assertEquals(result.getValue(fam1, qual1).length,
369                   result.getValue(fam1, qual2).length);
370               assertEquals(result.getValue(fam1, qual1).length,
371                   result.getValue(fam2, qual3).length);
372             } catch (IOException e) {
373               e.printStackTrace();
374               failures.incrementAndGet();
375               fail();
376             }
377           }
378         }
379       };
380     }
381 
382     // run all threads
383     for (int i = 0; i < numThreads; i++) {
384       all[i].start();
385     }
386 
387     // wait for all threads to finish
388     for (int i = 0; i < numThreads; i++) {
389       try {
390         all[i].join();
391       } catch (InterruptedException e) {
392       }
393     }
394     assertEquals(0, failures.get());
395     Get g = new Get(row);
396     Result result = region.get(g);
397     assertEquals(result.getValue(fam1, qual1).length, 10000);
398     assertEquals(result.getValue(fam1, qual2).length, 10000);
399     assertEquals(result.getValue(fam2, qual3).length, 10000);
400   }
401   /**
402    * Test multi-threaded row mutations.
403    */
404   @Test
405   public void testRowMutationMultiThreads() throws IOException {
406     LOG.info("Starting test testRowMutationMultiThreads");
407     initHRegion(tableName, name.getMethodName(), fam1);
408 
409     // create 10 threads, each will alternate between adding and
410     // removing a column
411     int numThreads = 10;
412     int opsPerThread = 250;
413     AtomicOperation[] all = new AtomicOperation[numThreads];
414 
415     AtomicLong timeStamps = new AtomicLong(0);
416     AtomicInteger failures = new AtomicInteger(0);
417     // create all threads
418     for (int i = 0; i < numThreads; i++) {
419       all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
420         @Override
421         public void run() {
422           boolean op = true;
423           for (int i=0; i<numOps; i++) {
424             try {
425               // throw in some flushes
426               if (i%10==0) {
427                 synchronized(region) {
428                   LOG.debug("flushing");
429                   region.flush(true);
430                   if (i%100==0) {
431                     region.compact(false);
432                   }
433                 }
434               }
435               long ts = timeStamps.incrementAndGet();
436               RowMutations rm = new RowMutations(row);
437               if (op) {
438                 Put p = new Put(row, ts);
439                 p.add(fam1, qual1, value1);
440                 p.setDurability(Durability.ASYNC_WAL);
441                 rm.add(p);
442                 Delete d = new Delete(row);
443                 d.deleteColumns(fam1, qual2, ts);
444                 d.setDurability(Durability.ASYNC_WAL);
445                 rm.add(d);
446               } else {
447                 Delete d = new Delete(row);
448                 d.deleteColumns(fam1, qual1, ts);
449                 d.setDurability(Durability.ASYNC_WAL);
450                 rm.add(d);
451                 Put p = new Put(row, ts);
452                 p.add(fam1, qual2, value2);
453                 p.setDurability(Durability.ASYNC_WAL);
454                 rm.add(p);
455               }
456               region.mutateRow(rm);
457               op ^= true;
458               // check: should always see exactly one column
459               Get g = new Get(row);
460               Result r = region.get(g);
461               if (r.size() != 1) {
462                 LOG.debug(r);
463                 failures.incrementAndGet();
464                 fail();
465               }
466             } catch (IOException e) {
467               e.printStackTrace();
468               failures.incrementAndGet();
469               fail();
470             }
471           }
472         }
473       };
474     }
475 
476     // run all threads
477     for (int i = 0; i < numThreads; i++) {
478       all[i].start();
479     }
480 
481     // wait for all threads to finish
482     for (int i = 0; i < numThreads; i++) {
483       try {
484         all[i].join();
485       } catch (InterruptedException e) {
486       }
487     }
488     assertEquals(0, failures.get());
489   }
490 
491 
492   /**
493    * Test multi-threaded region mutations.
494    */
495   @Test
496   public void testMultiRowMutationMultiThreads() throws IOException {
497 
498     LOG.info("Starting test testMultiRowMutationMultiThreads");
499     initHRegion(tableName, name.getMethodName(), fam1);
500 
501     // create 10 threads, each will alternate between adding and
502     // removing a column
503     int numThreads = 10;
504     int opsPerThread = 250;
505     AtomicOperation[] all = new AtomicOperation[numThreads];
506 
507     AtomicLong timeStamps = new AtomicLong(0);
508     AtomicInteger failures = new AtomicInteger(0);
509     final List<byte[]> rowsToLock = Arrays.asList(row, row2);
510     // create all threads
511     for (int i = 0; i < numThreads; i++) {
512       all[i] = new AtomicOperation(region, opsPerThread, timeStamps, failures) {
513         @Override
514         public void run() {
515           boolean op = true;
516           for (int i=0; i<numOps; i++) {
517             try {
518               // throw in some flushes
519               if (i%10==0) {
520                 synchronized(region) {
521                   LOG.debug("flushing");
522                   region.flush(true);
523                   if (i%100==0) {
524                     region.compact(false);
525                   }
526                 }
527               }
528               long ts = timeStamps.incrementAndGet();
529               List<Mutation> mrm = new ArrayList<Mutation>();
530               if (op) {
531                 Put p = new Put(row2, ts);
532                 p.add(fam1, qual1, value1);
533                 p.setDurability(Durability.ASYNC_WAL);
534                 mrm.add(p);
535                 Delete d = new Delete(row);
536                 d.deleteColumns(fam1, qual1, ts);
537                 d.setDurability(Durability.ASYNC_WAL);
538                 mrm.add(d);
539               } else {
540                 Delete d = new Delete(row2);
541                 d.deleteColumns(fam1, qual1, ts);
542                 d.setDurability(Durability.ASYNC_WAL);
543                 mrm.add(d);
544                 Put p = new Put(row, ts);
545                 p.setDurability(Durability.ASYNC_WAL);
546                 p.add(fam1, qual1, value2);
547                 mrm.add(p);
548               }
549               region.mutateRowsWithLocks(mrm, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE);
550               op ^= true;
551               // check: should always see exactly one column
552               Scan s = new Scan(row);
553               RegionScanner rs = region.getScanner(s);
554               List<Cell> r = new ArrayList<Cell>();
555               while (rs.next(r))
556                 ;
557               rs.close();
558               if (r.size() != 1) {
559                 LOG.debug(r);
560                 failures.incrementAndGet();
561                 fail();
562               }
563             } catch (IOException e) {
564               e.printStackTrace();
565               failures.incrementAndGet();
566               fail();
567             }
568           }
569         }
570       };
571     }
572 
573     // run all threads
574     for (int i = 0; i < numThreads; i++) {
575       all[i].start();
576     }
577 
578     // wait for all threads to finish
579     for (int i = 0; i < numThreads; i++) {
580       try {
581         all[i].join();
582       } catch (InterruptedException e) {
583       }
584     }
585     assertEquals(0, failures.get());
586   }
587 
588   public static class AtomicOperation extends Thread {
589     protected final HRegion region;
590     protected final int numOps;
591     protected final AtomicLong timeStamps;
592     protected final AtomicInteger failures;
593     protected final Random r = new Random();
594 
595     public AtomicOperation(HRegion region, int numOps, AtomicLong timeStamps,
596         AtomicInteger failures) {
597       this.region = region;
598       this.numOps = numOps;
599       this.timeStamps = timeStamps;
600       this.failures = failures;
601     }
602   }
603 
604   private static CountDownLatch latch = new CountDownLatch(1);
605   private enum TestStep {
606     INIT,                  // initial put of 10 to set value of the cell
607     PUT_STARTED,           // began doing a put of 50 to cell
608     PUT_COMPLETED,         // put complete (released RowLock, but may not have advanced MVCC).
609     CHECKANDPUT_STARTED,   // began checkAndPut: if 10 -> 11
610     CHECKANDPUT_COMPLETED  // completed checkAndPut
611     // NOTE: at the end of these steps, the value of the cell should be 50, not 11!
612   }
613   private static volatile TestStep testStep = TestStep.INIT;
614   private final String family = "f1";
615 
616   /**
617    * Test written as a verifier for HBASE-7051, CheckAndPut should properly read
618    * MVCC.
619    *
620    * Moved into TestAtomicOperation from its original location, TestHBase7051
621    */
622   @Test
623   public void testPutAndCheckAndPutInParallel() throws Exception {
624 
625     final String tableName = "testPutAndCheckAndPut";
626     Configuration conf = TEST_UTIL.getConfiguration();
627     conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class);
628     final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
629         null, null, tableName, conf, false, Durability.SYNC_WAL, null, Bytes.toBytes(family));
630 
631     Put[] puts = new Put[1];
632     Put put = new Put(Bytes.toBytes("r1"));
633     put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10"));
634     puts[0] = put;
635 
636     region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
637     MultithreadedTestUtil.TestContext ctx =
638       new MultithreadedTestUtil.TestContext(conf);
639     ctx.addThread(new PutThread(ctx, region));
640     ctx.addThread(new CheckAndPutThread(ctx, region));
641     ctx.startThreads();
642     while (testStep != TestStep.CHECKANDPUT_COMPLETED) {
643       Thread.sleep(100);
644     }
645     ctx.stop();
646     Scan s = new Scan();
647     RegionScanner scanner = region.getScanner(s);
648     List<Cell> results = new ArrayList<Cell>();
649     ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build();
650     scanner.next(results, scannerContext);
651     for (Cell keyValue : results) {
652       assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue)));
653     }
654   }
655 
656   private class PutThread extends TestThread {
657     private HRegion region;
658     PutThread(TestContext ctx, HRegion region) {
659       super(ctx);
660       this.region = region;
661     }
662 
663     public void doWork() throws Exception {
664       Put[] puts = new Put[1];
665       Put put = new Put(Bytes.toBytes("r1"));
666       put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50"));
667       puts[0] = put;
668       testStep = TestStep.PUT_STARTED;
669       region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE);
670     }
671   }
672 
673   private class CheckAndPutThread extends TestThread {
674     private HRegion region;
675     CheckAndPutThread(TestContext ctx, HRegion region) {
676       super(ctx);
677       this.region = region;
678    }
679 
680     public void doWork() throws Exception {
681       Put[] puts = new Put[1];
682       Put put = new Put(Bytes.toBytes("r1"));
683       put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("11"));
684       puts[0] = put;
685       while (testStep != TestStep.PUT_COMPLETED) {
686         Thread.sleep(100);
687       }
688       testStep = TestStep.CHECKANDPUT_STARTED;
689       region.checkAndMutate(Bytes.toBytes("r1"), Bytes.toBytes(family), Bytes.toBytes("q1"),
690         CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes("10")), put, true);
691       testStep = TestStep.CHECKANDPUT_COMPLETED;
692     }
693   }
694 
695   public static class MockHRegion extends HRegion {
696 
697     public MockHRegion(Path tableDir, WAL log, FileSystem fs, Configuration conf,
698         final HRegionInfo regionInfo, final HTableDescriptor htd, RegionServerServices rsServices) {
699       super(tableDir, log, fs, conf, regionInfo, htd, rsServices);
700     }
701 
702     @Override
703     public RowLock getRowLockInternal(final byte[] row, boolean readLock, boolean waitForLock,
704         final RowLock prevRowLock) throws IOException {
705       if (testStep == TestStep.CHECKANDPUT_STARTED) {
706         latch.countDown();
707       }
708       return new WrappedRowLock(super.getRowLockInternal(row, readLock, waitForLock, prevRowLock));
709     }
710 
711     public class WrappedRowLock implements RowLock {
712 
713       private final RowLock rowLock;
714 
715       private WrappedRowLock(RowLock rowLock) {
716         this.rowLock = rowLock;
717       }
718 
719 
720       @Override
721       public void release() {
722         if (testStep == TestStep.INIT) {
723           this.rowLock.release();
724           return;
725         }
726 
727         if (testStep == TestStep.PUT_STARTED) {
728           try {
729             testStep = TestStep.PUT_COMPLETED;
730             this.rowLock.release();
731             // put has been written to the memstore and the row lock has been released, but the
732             // MVCC has not been advanced.  Prior to fixing HBASE-7051, the following order of
733             // operations would cause the non-atomicity to show up:
734             // 1) Put releases row lock (where we are now)
735             // 2) CheckAndPut grabs row lock and reads the value prior to the put (10)
736             //    because the MVCC has not advanced
737             // 3) Put advances MVCC
738             // So, in order to recreate this order, we wait for the checkAndPut to grab the rowLock
739             // (see below), and then wait some more to give the checkAndPut time to read the old
740             // value.
741             latch.await();
742             Thread.sleep(1000);
743           } catch (InterruptedException e) {
744             Thread.currentThread().interrupt();
745           }
746         }
747         else if (testStep == TestStep.CHECKANDPUT_STARTED) {
748           this.rowLock.release();
749         }
750       }
751     }
752   }
753 }