View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import static org.junit.Assert.assertArrayEquals;
23  import static org.junit.Assert.assertEquals;
24  import static org.junit.Assert.assertNull;
25  import static org.junit.Assert.assertTrue;
26  import static org.mockito.Matchers.any;
27  import static org.mockito.Mockito.spy;
28  import static org.mockito.Mockito.times;
29  import static org.mockito.Mockito.verify;
30  
31  import com.google.common.collect.Lists;
32  import java.io.IOException;
33  import java.lang.ref.SoftReference;
34  import java.security.PrivilegedExceptionAction;
35  import java.util.ArrayList;
36  import java.util.Arrays;
37  import java.util.Collection;
38  import java.util.Collections;
39  import java.util.Iterator;
40  import java.util.List;
41  import java.util.ListIterator;
42  import java.util.NavigableSet;
43  import java.util.TreeSet;
44  import java.util.concurrent.ConcurrentSkipListSet;
45  import java.util.concurrent.ExecutorService;
46  import java.util.concurrent.Executors;
47  import java.util.concurrent.TimeUnit;
48  import java.util.concurrent.atomic.AtomicBoolean;
49  import org.apache.commons.logging.Log;
50  import org.apache.commons.logging.LogFactory;
51  import org.apache.hadoop.conf.Configuration;
52  import org.apache.hadoop.fs.FSDataOutputStream;
53  import org.apache.hadoop.fs.FileStatus;
54  import org.apache.hadoop.fs.FileSystem;
55  import org.apache.hadoop.fs.FilterFileSystem;
56  import org.apache.hadoop.fs.LocalFileSystem;
57  import org.apache.hadoop.fs.Path;
58  import org.apache.hadoop.fs.permission.FsPermission;
59  import org.apache.hadoop.hbase.Cell;
60  import org.apache.hadoop.hbase.CellUtil;
61  import org.apache.hadoop.hbase.HBaseConfiguration;
62  import org.apache.hadoop.hbase.HBaseTestingUtility;
63  import org.apache.hadoop.hbase.HColumnDescriptor;
64  import org.apache.hadoop.hbase.HRegionInfo;
65  import org.apache.hadoop.hbase.HTableDescriptor;
66  import org.apache.hadoop.hbase.KeyValue;
67  import org.apache.hadoop.hbase.KeyValue.KVComparator;
68  import org.apache.hadoop.hbase.KeyValueUtil;
69  import org.apache.hadoop.hbase.TableName;
70  import org.apache.hadoop.hbase.client.Get;
71  import org.apache.hadoop.hbase.client.Scan;
72  import org.apache.hadoop.hbase.filter.Filter;
73  import org.apache.hadoop.hbase.filter.FilterBase;
74  import org.apache.hadoop.hbase.io.compress.Compression;
75  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
76  import org.apache.hadoop.hbase.io.hfile.CacheConfig;
77  import org.apache.hadoop.hbase.io.hfile.HFile;
78  import org.apache.hadoop.hbase.io.hfile.HFileContext;
79  import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
80  import org.apache.hadoop.hbase.monitoring.MonitoredTask;
81  import static org.apache.hadoop.hbase.regionserver.MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY;
82  import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
83  import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
84  import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher;
85  import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
86  import org.apache.hadoop.hbase.security.User;
87  import org.apache.hadoop.hbase.testclassification.MediumTests;
88  import org.apache.hadoop.hbase.util.Bytes;
89  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
90  import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
91  import org.apache.hadoop.hbase.util.FSUtils;
92  import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
93  import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
94  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
95  import org.apache.hadoop.hbase.wal.WALFactory;
96  import org.apache.hadoop.util.Progressable;
97  import org.junit.After;
98  import org.junit.Assert;
99  import static org.junit.Assert.assertEquals;
100 import static org.junit.Assert.assertTrue;
101 import org.junit.Before;
102 import org.junit.Rule;
103 import org.junit.Test;
104 import org.junit.experimental.categories.Category;
105 import org.junit.rules.TestName;
106 import org.mockito.Mockito;
107 
108 /**
109  * Test class for the Store
110  */
111 @Category(MediumTests.class)
112 public class TestStore {
113   private static final Log LOG = LogFactory.getLog(TestStore.class);
114   @Rule public TestName name = new TestName();
115 
116   HStore store;
117   byte [] table = Bytes.toBytes("table");
118   byte [] family = Bytes.toBytes("family");
119 
120   byte [] row = Bytes.toBytes("row");
121   byte [] row2 = Bytes.toBytes("row2");
122   byte [] qf1 = Bytes.toBytes("qf1");
123   byte [] qf2 = Bytes.toBytes("qf2");
124   byte [] qf3 = Bytes.toBytes("qf3");
125   byte [] qf4 = Bytes.toBytes("qf4");
126   byte [] qf5 = Bytes.toBytes("qf5");
127   byte [] qf6 = Bytes.toBytes("qf6");
128 
129   NavigableSet<byte[]> qualifiers =
130     new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
131 
132   List<Cell> expected = new ArrayList<Cell>();
133   List<Cell> result = new ArrayList<Cell>();
134 
135   long id = System.currentTimeMillis();
136   Get get = new Get(row);
137 
138   private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
139   private final String DIR = TEST_UTIL.getDataTestDir("TestStore").toString();
140 
141 
142   /**
143    * Setup
144    * @throws IOException
145    */
146   @Before
147   public void setUp() throws IOException {
148     qualifiers.clear();
149     qualifiers.add(qf1);
150     qualifiers.add(qf3);
151     qualifiers.add(qf5);
152 
153     Iterator<byte[]> iter = qualifiers.iterator();
154     while(iter.hasNext()){
155       byte [] next = iter.next();
156       expected.add(new KeyValue(row, family, next, 1, (byte[])null));
157       get.addColumn(family, next);
158     }
159   }
160 
161   private void init(String methodName) throws IOException {
162     init(methodName, TEST_UTIL.getConfiguration());
163   }
164 
165   private void init(String methodName, Configuration conf)
166   throws IOException {
167     HColumnDescriptor hcd = new HColumnDescriptor(family);
168     // some of the tests write 4 versions and then flush
169     // (with HBASE-4241, lower versions are collected on flush)
170     hcd.setMaxVersions(4);
171     init(methodName, conf, hcd);
172   }
173 
174   private void init(String methodName, Configuration conf,
175       HColumnDescriptor hcd) throws IOException {
176     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
177     init(methodName, conf, htd, hcd);
178   }
179 
180   private Store init(String methodName, Configuration conf, HTableDescriptor htd,
181       HColumnDescriptor hcd) throws IOException {
182     return init(methodName, conf, htd, hcd, null);
183   }
184 
185   @SuppressWarnings("deprecation")
186   private Store init(String methodName, Configuration conf, HTableDescriptor htd,
187       HColumnDescriptor hcd, MyStoreHook hook) throws IOException {
188     //Setting up a Store
189     Path basedir = new Path(DIR+methodName);
190     Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
191     final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(methodName));
192 
193     FileSystem fs = FileSystem.get(conf);
194 
195     fs.delete(logdir, true);
196 
197     if (htd.hasFamily(hcd.getName())) {
198       htd.modifyFamily(hcd);
199     } else {
200       htd.addFamily(hcd);
201     }
202     HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
203     final Configuration walConf = new Configuration(conf);
204     FSUtils.setRootDir(walConf, basedir);
205     final WALFactory wals = new WALFactory(walConf, null, methodName);
206     HRegion region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes(),
207             info.getTable().getNamespace()), fs, conf, info, htd, null);
208     if (hook == null) {
209       store = new HStore(region, hcd, conf);
210     } else {
211       store = new MyStore(region, hcd, conf, hook);
212     }
213     return store;
214   }
215 
216   /**
217    * Test we do not lose data if we fail a flush and then close.
218    * Part of HBase-10466
219    * @throws Exception
220    */
221   @Test
222   public void testFlushSizeAccounting() throws Exception {
223     LOG.info("Setting up a faulty file system that cannot write in " +
224       this.name.getMethodName());
225     final Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
226     // Only retry once.
227     conf.setInt("hbase.hstore.flush.retries.number", 1);
228     User user = User.createUserForTesting(conf, this.name.getMethodName(),
229       new String[]{"foo"});
230     // Inject our faulty LocalFileSystem
231     conf.setClass("fs.file.impl", FaultyFileSystem.class, FileSystem.class);
232     user.runAs(new PrivilegedExceptionAction<Object>() {
233       @Override
234       public Object run() throws Exception {
235         // Make sure it worked (above is sensitive to caching details in hadoop core)
236         FileSystem fs = FileSystem.get(conf);
237         Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
238         FaultyFileSystem ffs = (FaultyFileSystem)fs;
239 
240         // Initialize region
241         init(name.getMethodName(), conf);
242 
243         long size = store.memstore.getFlushableSize();
244         Assert.assertEquals(0, size);
245         LOG.info("Adding some data");
246         long kvSize = store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
247         size = store.memstore.getFlushableSize();
248         Assert.assertEquals(kvSize, size);
249         // Flush.  Bug #1 from HBASE-10466.  Make sure size calculation on failed flush is right.
250         try {
251           LOG.info("Flushing");
252           flushStore(store, id++);
253           Assert.fail("Didn't bubble up IOE!");
254         } catch (IOException ioe) {
255           Assert.assertTrue(ioe.getMessage().contains("Fault injected"));
256         }
257         size = store.memstore.getFlushableSize();
258         Assert.assertEquals(kvSize, size);
259         store.add(new KeyValue(row, family, qf2, 2, (byte[])null));
260         // Even though we add a new kv, we expect the flushable size to be 'same' since we have
261         // not yet cleared the snapshot -- the above flush failed.
262         Assert.assertEquals(kvSize, size);
263         ffs.fault.set(false);
264         flushStore(store, id++);
265         size = store.memstore.getFlushableSize();
266         // Size should be the foreground kv size.
267         Assert.assertEquals(kvSize, size);
268         flushStore(store, id++);
269         size = store.memstore.getFlushableSize();
270         Assert.assertEquals(0, size);
271         return null;
272       }
273     });
274   }
275 
276   /**
277    * Verify that compression and data block encoding are respected by the
278    * Store.createWriterInTmp() method, used on store flush.
279    */
280   @Test
281   public void testCreateWriter() throws Exception {
282     Configuration conf = HBaseConfiguration.create();
283     FileSystem fs = FileSystem.get(conf);
284 
285     HColumnDescriptor hcd = new HColumnDescriptor(family);
286     hcd.setCompressionType(Compression.Algorithm.GZ);
287     hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
288     init(name.getMethodName(), conf, hcd);
289 
290     // Test createWriterInTmp()
291     StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
292     Path path = writer.getPath();
293     writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
294     writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
295     writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
296     writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
297     writer.close();
298 
299     // Verify that compression and encoding settings are respected
300     HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
301     Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
302     Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
303     reader.close();
304   }
305 
306   @Test
307   public void testDeleteExpiredStoreFiles() throws Exception {
308     testDeleteExpiredStoreFiles(0);
309     testDeleteExpiredStoreFiles(1);
310   }
311 
312   /*
313    * @param minVersions the MIN_VERSIONS for the column family
314    */
315   public void testDeleteExpiredStoreFiles(int minVersions) throws Exception {
316     int storeFileNum = 4;
317     int ttl = 4;
318     IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
319     EnvironmentEdgeManagerTestHelper.injectEdge(edge);
320 
321     Configuration conf = HBaseConfiguration.create();
322     // Enable the expired store file deletion
323     conf.setBoolean("hbase.store.delete.expired.storefile", true);
324     // Set the compaction threshold higher to avoid normal compactions.
325     conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 5);
326 
327     HColumnDescriptor hcd = new HColumnDescriptor(family);
328     hcd.setMinVersions(minVersions);
329     hcd.setTimeToLive(ttl);
330     init(name.getMethodName() + "-" + minVersions, conf, hcd);
331 
332     long storeTtl = this.store.getScanInfo().getTtl();
333     long sleepTime = storeTtl / storeFileNum;
334     long timeStamp;
335     // There are 4 store files and the max time stamp difference among these
336     // store files will be (this.store.ttl / storeFileNum)
337     for (int i = 1; i <= storeFileNum; i++) {
338       LOG.info("Adding some data for the store file #" + i);
339       timeStamp = EnvironmentEdgeManager.currentTime();
340       this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null));
341       this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null));
342       this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null));
343       flush(i);
344       edge.incrementTime(sleepTime);
345     }
346 
347     // Verify the total number of store files
348     Assert.assertEquals(storeFileNum, this.store.getStorefiles().size());
349 
350      // Each call will find one expired store file and delete it before compaction happens.
351      // There will be no compaction due to threshold above. Last file will not be replaced.
352     for (int i = 1; i <= storeFileNum - 1; i++) {
353       // verify the expired store file.
354       assertNull(this.store.requestCompaction());
355       Collection<StoreFile> sfs = this.store.getStorefiles();
356       // Ensure i files are gone.
357       if (minVersions == 0) {
358         assertEquals(storeFileNum - i, sfs.size());
359         // Ensure only non-expired files remain.
360         for (StoreFile sf : sfs) {
361           assertTrue(sf.getReader().getMaxTimestamp() >= (edge.currentTime() - storeTtl));
362         }
363       } else {
364         assertEquals(storeFileNum, sfs.size());
365       }
366       // Let the next store file expired.
367       edge.incrementTime(sleepTime);
368     }
369     assertNull(this.store.requestCompaction());
370 
371     Collection<StoreFile> sfs = this.store.getStorefiles();
372     // Assert the last expired file is not removed.
373     if (minVersions == 0) {
374       assertEquals(1, sfs.size());
375     }
376     long ts = sfs.iterator().next().getReader().getMaxTimestamp();
377     assertTrue(ts < (edge.currentTime() - storeTtl));
378 
379     for (StoreFile sf : sfs) {
380       sf.closeReader(true);
381     }
382   }
383 
384   @Test
385   public void testRollback() throws IOException {
386     Configuration conf = HBaseConfiguration.create();
387     FileSystem fs = FileSystem.get(conf);
388     // Initialize region
389     init(name.getMethodName(), conf);
390     Cell cell = CellUtil.createCell(row, family, qf1);
391     int len = KeyValueUtil.length(cell);
392     int offset = 77;
393     byte[] buf = new byte[offset + len];
394     KeyValueUtil.appendToByteArray(cell, buf, offset);
395     KeyValue newKv = new KeyValue(buf, offset, len);
396     newKv.setSequenceId(cell.getSequenceId());
397     List<Cell> testCells = Arrays.asList(cell, cell, newKv);
398     for (Cell c : testCells) {
399       long sizeBeforeRollback = store.heapSize();
400       store.add(cell);
401       store.rollback(cell);
402       long sizeAeforeRollback = store.heapSize();
403       assertEquals(sizeBeforeRollback, sizeAeforeRollback);
404     }
405   }
406 
407   @Test
408   public void testLowestModificationTime() throws Exception {
409     Configuration conf = HBaseConfiguration.create();
410     FileSystem fs = FileSystem.get(conf);
411     // Initialize region
412     init(name.getMethodName(), conf);
413 
414     int storeFileNum = 4;
415     for (int i = 1; i <= storeFileNum; i++) {
416       LOG.info("Adding some data for the store file #"+i);
417       this.store.add(new KeyValue(row, family, qf1, i, (byte[])null));
418       this.store.add(new KeyValue(row, family, qf2, i, (byte[])null));
419       this.store.add(new KeyValue(row, family, qf3, i, (byte[])null));
420       flush(i);
421     }
422     // after flush; check the lowest time stamp
423     long lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles());
424     long lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles());
425     Assert.assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS);
426 
427     // after compact; check the lowest time stamp
428     store.compact(store.requestCompaction(), NoLimitThroughputController.INSTANCE);
429     lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles());
430     lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles());
431     Assert.assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS);
432   }
433 
434   private static long getLowestTimeStampFromFS(FileSystem fs,
435       final Collection<StoreFile> candidates) throws IOException {
436     long minTs = Long.MAX_VALUE;
437     if (candidates.isEmpty()) {
438       return minTs;
439     }
440     Path[] p = new Path[candidates.size()];
441     int i = 0;
442     for (StoreFile sf : candidates) {
443       p[i] = sf.getPath();
444       ++i;
445     }
446 
447     FileStatus[] stats = fs.listStatus(p);
448     if (stats == null || stats.length == 0) {
449       return minTs;
450     }
451     for (FileStatus s : stats) {
452       minTs = Math.min(minTs, s.getModificationTime());
453     }
454     return minTs;
455   }
456 
457   //////////////////////////////////////////////////////////////////////////////
458   // Get tests
459   //////////////////////////////////////////////////////////////////////////////
460 
461   private static final int BLOCKSIZE_SMALL = 8192;
462   /**
463    * Test for hbase-1686.
464    * @throws IOException
465    */
466   @Test
467   public void testEmptyStoreFile() throws IOException {
468     init(this.name.getMethodName());
469     // Write a store file.
470     this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
471     this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
472     flush(1);
473     // Now put in place an empty store file.  Its a little tricky.  Have to
474     // do manually with hacked in sequence id.
475     StoreFile f = this.store.getStorefiles().iterator().next();
476     Path storedir = f.getPath().getParent();
477     long seqid = f.getMaxSequenceId();
478     Configuration c = HBaseConfiguration.create();
479     FileSystem fs = FileSystem.get(c);
480     HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
481     StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
482         fs)
483             .withOutputDir(storedir)
484             .withFileContext(meta)
485             .build();
486     w.appendMetadata(seqid + 1, false);
487     w.close();
488     this.store.close();
489     // Reopen it... should pick up two files
490     this.store = new HStore(this.store.getHRegion(), this.store.getFamily(), c);
491     Assert.assertEquals(2, this.store.getStorefilesCount());
492 
493     result = HBaseTestingUtility.getFromStoreFile(store,
494         get.getRow(),
495         qualifiers);
496     Assert.assertEquals(1, result.size());
497   }
498 
499   /**
500    * Getting data from memstore only
501    * @throws IOException
502    */
503   @Test
504   public void testGet_FromMemStoreOnly() throws IOException {
505     init(this.name.getMethodName());
506 
507     //Put data in memstore
508     this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
509     this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
510     this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
511     this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null));
512     this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null));
513     this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null));
514 
515     //Get
516     result = HBaseTestingUtility.getFromStoreFile(store,
517         get.getRow(), qualifiers);
518 
519     //Compare
520     assertCheck();
521   }
522 
523   /**
524    * Getting data from files only
525    * @throws IOException
526    */
527   @Test
528   public void testGet_FromFilesOnly() throws IOException {
529     init(this.name.getMethodName());
530 
531     //Put data in memstore
532     this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
533     this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
534     //flush
535     flush(1);
536 
537     //Add more data
538     this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
539     this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null));
540     //flush
541     flush(2);
542 
543     //Add more data
544     this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null));
545     this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null));
546     //flush
547     flush(3);
548 
549     //Get
550     result = HBaseTestingUtility.getFromStoreFile(store,
551         get.getRow(),
552         qualifiers);
553     //this.store.get(get, qualifiers, result);
554 
555     //Need to sort the result since multiple files
556     Collections.sort(result, KeyValue.COMPARATOR);
557 
558     //Compare
559     assertCheck();
560   }
561 
562   /**
563    * Getting data from memstore and files
564    * @throws IOException
565    */
566   @Test
567   public void testGet_FromMemStoreAndFiles() throws IOException {
568     init(this.name.getMethodName());
569 
570     //Put data in memstore
571     this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
572     this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
573     //flush
574     flush(1);
575 
576     //Add more data
577     this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
578     this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null));
579     //flush
580     flush(2);
581 
582     //Add more data
583     this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null));
584     this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null));
585 
586     //Get
587     result = HBaseTestingUtility.getFromStoreFile(store,
588         get.getRow(), qualifiers);
589 
590     //Need to sort the result since multiple files
591     Collections.sort(result, KeyValue.COMPARATOR);
592 
593     //Compare
594     assertCheck();
595   }
596 
597   private void flush(int storeFilessize) throws IOException{
598     this.store.snapshot();
599     flushStore(store, id++);
600     Assert.assertEquals(storeFilessize, this.store.getStorefiles().size());
601     Assert.assertEquals(0, ((DefaultMemStore)this.store.memstore)
602       .activeSection.getCellSkipListSet().sizeForTests());
603   }
604 
605   private void assertCheck() {
606     Assert.assertEquals(expected.size(), result.size());
607     for(int i=0; i<expected.size(); i++) {
608       Assert.assertEquals(expected.get(i), result.get(i));
609     }
610   }
611 
612   //////////////////////////////////////////////////////////////////////////////
613   // IncrementColumnValue tests
614   //////////////////////////////////////////////////////////////////////////////
615   /*
616    * test the internal details of how ICV works, especially during a flush scenario.
617    */
618   @Test
619   public void testIncrementColumnValue_ICVDuringFlush()
620       throws IOException, InterruptedException {
621     init(this.name.getMethodName());
622 
623     long oldValue = 1L;
624     long newValue = 3L;
625     this.store.add(new KeyValue(row, family, qf1,
626         System.currentTimeMillis(),
627         Bytes.toBytes(oldValue)));
628 
629     // snapshot the store.
630     this.store.snapshot();
631 
632     // add other things:
633     this.store.add(new KeyValue(row, family, qf2,
634         System.currentTimeMillis(),
635         Bytes.toBytes(oldValue)));
636 
637     // update during the snapshot.
638     long ret = this.store.updateColumnValue(row, family, qf1, newValue);
639 
640     // memstore should have grown by some amount.
641     Assert.assertTrue(ret > 0);
642 
643     // then flush.
644     flushStore(store, id++);
645     Assert.assertEquals(1, this.store.getStorefiles().size());
646     // from the one we inserted up there, and a new one
647     Assert.assertEquals(2, ((DefaultMemStore)this.store.memstore)
648       .activeSection.getCellSkipListSet().sizeForTests());
649 
650     // how many key/values for this row are there?
651     Get get = new Get(row);
652     get.addColumn(family, qf1);
653     get.setMaxVersions(); // all versions.
654     List<Cell> results = new ArrayList<Cell>();
655 
656     results = HBaseTestingUtility.getFromStoreFile(store, get);
657     Assert.assertEquals(2, results.size());
658 
659     long ts1 = results.get(0).getTimestamp();
660     long ts2 = results.get(1).getTimestamp();
661 
662     Assert.assertTrue(ts1 > ts2);
663 
664     Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
665     Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
666   }
667 
668   @After
669   public void tearDown() throws Exception {
670     EnvironmentEdgeManagerTestHelper.reset();
671   }
672 
673   @Test
674   public void testICV_negMemstoreSize()  throws IOException {
675       init(this.name.getMethodName());
676 
677     long time = 100;
678     ManualEnvironmentEdge ee = new ManualEnvironmentEdge();
679     ee.setValue(time);
680     EnvironmentEdgeManagerTestHelper.injectEdge(ee);
681     long newValue = 3L;
682     long size = 0;
683 
684 
685     size += this.store.add(new KeyValue(Bytes.toBytes("200909091000"), family, qf1,
686         System.currentTimeMillis(), Bytes.toBytes(newValue)));
687     size += this.store.add(new KeyValue(Bytes.toBytes("200909091200"), family, qf1,
688         System.currentTimeMillis(), Bytes.toBytes(newValue)));
689     size += this.store.add(new KeyValue(Bytes.toBytes("200909091300"), family, qf1,
690         System.currentTimeMillis(), Bytes.toBytes(newValue)));
691     size += this.store.add(new KeyValue(Bytes.toBytes("200909091400"), family, qf1,
692         System.currentTimeMillis(), Bytes.toBytes(newValue)));
693     size += this.store.add(new KeyValue(Bytes.toBytes("200909091500"), family, qf1,
694         System.currentTimeMillis(), Bytes.toBytes(newValue)));
695 
696 
697     for ( int i = 0 ; i < 10000 ; ++i) {
698       newValue++;
699 
700       long ret = this.store.updateColumnValue(row, family, qf1, newValue);
701       long ret2 = this.store.updateColumnValue(row2, family, qf1, newValue);
702 
703       if (ret != 0) System.out.println("ret: " + ret);
704       if (ret2 != 0) System.out.println("ret2: " + ret2);
705 
706       Assert.assertTrue("ret: " + ret, ret >= 0);
707       size += ret;
708       Assert.assertTrue("ret2: " + ret2, ret2 >= 0);
709       size += ret2;
710 
711 
712       if (i % 1000 == 0)
713         ee.setValue(++time);
714     }
715 
716     long computedSize=0;
717     for (Cell cell : ((DefaultMemStore)this.store.memstore).activeSection.getCellSkipListSet()) {
718       long kvsize = DefaultMemStore.heapSizeChange(cell, true);
719       //System.out.println(kv + " size= " + kvsize + " kvsize= " + kv.heapSize());
720       computedSize += kvsize;
721     }
722     Assert.assertEquals(computedSize, size);
723   }
724 
725   @Test
726   public void testIncrementColumnValue_SnapshotFlushCombo() throws Exception {
727     ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
728     EnvironmentEdgeManagerTestHelper.injectEdge(mee);
729     init(this.name.getMethodName());
730 
731     long oldValue = 1L;
732     long newValue = 3L;
733     this.store.add(new KeyValue(row, family, qf1,
734         EnvironmentEdgeManager.currentTime(),
735         Bytes.toBytes(oldValue)));
736 
737     // snapshot the store.
738     this.store.snapshot();
739 
740     // update during the snapshot, the exact same TS as the Put (lololol)
741     long ret = this.store.updateColumnValue(row, family, qf1, newValue);
742 
743     // memstore should have grown by some amount.
744     Assert.assertTrue(ret > 0);
745 
746     // then flush.
747     flushStore(store, id++);
748     Assert.assertEquals(1, this.store.getStorefiles().size());
749     Assert.assertEquals(1, ((DefaultMemStore)this.store.memstore)
750       .activeSection.getCellSkipListSet().sizeForTests());
751 
752     // now increment again:
753     newValue += 1;
754     this.store.updateColumnValue(row, family, qf1, newValue);
755 
756     // at this point we have a TS=1 in snapshot, and a TS=2 in kvset, so increment again:
757     newValue += 1;
758     this.store.updateColumnValue(row, family, qf1, newValue);
759 
760     // the second TS should be TS=2 or higher., even though 'time=1' right now.
761 
762 
763     // how many key/values for this row are there?
764     Get get = new Get(row);
765     get.addColumn(family, qf1);
766     get.setMaxVersions(); // all versions.
767     List<Cell> results = new ArrayList<Cell>();
768 
769     results = HBaseTestingUtility.getFromStoreFile(store, get);
770     Assert.assertEquals(2, results.size());
771 
772     long ts1 = results.get(0).getTimestamp();
773     long ts2 = results.get(1).getTimestamp();
774 
775     Assert.assertTrue(ts1 > ts2);
776     Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
777     Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
778 
779     mee.setValue(2); // time goes up slightly
780     newValue += 1;
781     this.store.updateColumnValue(row, family, qf1, newValue);
782 
783     results = HBaseTestingUtility.getFromStoreFile(store, get);
784     Assert.assertEquals(2, results.size());
785 
786     ts1 = results.get(0).getTimestamp();
787     ts2 = results.get(1).getTimestamp();
788 
789     Assert.assertTrue(ts1 > ts2);
790     Assert.assertEquals(newValue, Bytes.toLong(CellUtil.cloneValue(results.get(0))));
791     Assert.assertEquals(oldValue, Bytes.toLong(CellUtil.cloneValue(results.get(1))));
792   }
793 
794   @Test
795   public void testHandleErrorsInFlush() throws Exception {
796     LOG.info("Setting up a faulty file system that cannot write");
797 
798     final Configuration conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
799     User user = User.createUserForTesting(conf,
800         "testhandleerrorsinflush", new String[]{"foo"});
801     // Inject our faulty LocalFileSystem
802     conf.setClass("fs.file.impl", FaultyFileSystem.class,
803         FileSystem.class);
804     user.runAs(new PrivilegedExceptionAction<Object>() {
805       @Override
806       public Object run() throws Exception {
807         // Make sure it worked (above is sensitive to caching details in hadoop core)
808         FileSystem fs = FileSystem.get(conf);
809         Assert.assertEquals(FaultyFileSystem.class, fs.getClass());
810 
811         // Initialize region
812         init(name.getMethodName(), conf);
813 
814         LOG.info("Adding some data");
815         store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
816         store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
817         store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
818 
819         LOG.info("Before flush, we should have no files");
820 
821         Collection<StoreFileInfo> files =
822           store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
823         Assert.assertEquals(0, files != null ? files.size() : 0);
824 
825         //flush
826         try {
827           LOG.info("Flushing");
828           flush(1);
829           Assert.fail("Didn't bubble up IOE!");
830         } catch (IOException ioe) {
831           Assert.assertTrue(ioe.getMessage().contains("Fault injected"));
832         }
833 
834         LOG.info("After failed flush, we should still have no files!");
835         files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
836         Assert.assertEquals(0, files != null ? files.size() : 0);
837         store.getHRegion().getWAL().close();
838         return null;
839       }
840     });
841     FileSystem.closeAllForUGI(user.getUGI());
842   }
843 
844   /**
845    * Faulty file system that will fail if you write past its fault position the FIRST TIME
846    * only; thereafter it will succeed.  Used by {@link TestHRegion} too.
847    */
848   static class FaultyFileSystem extends FilterFileSystem {
849     List<SoftReference<FaultyOutputStream>> outStreams =
850       new ArrayList<SoftReference<FaultyOutputStream>>();
851     private long faultPos = 200;
852     AtomicBoolean fault = new AtomicBoolean(true);
853 
854     public FaultyFileSystem() {
855       super(new LocalFileSystem());
856       System.err.println("Creating faulty!");
857     }
858 
859     @Override
860     public FSDataOutputStream create(Path p) throws IOException {
861       return new FaultyOutputStream(super.create(p), faultPos, fault);
862     }
863 
864     @Override
865     public FSDataOutputStream create(Path f, FsPermission permission,
866         boolean overwrite, int bufferSize, short replication, long blockSize,
867         Progressable progress) throws IOException {
868       return new FaultyOutputStream(super.create(f, permission,
869           overwrite, bufferSize, replication, blockSize, progress), faultPos, fault);
870     }
871 
872     @Override
873     public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
874         int bufferSize, short replication, long blockSize, Progressable progress)
875     throws IOException {
876       // Fake it.  Call create instead.  The default implementation throws an IOE
877       // that this is not supported.
878       return create(f, overwrite, bufferSize, replication, blockSize, progress);
879     }
880   }
881 
882   static class FaultyOutputStream extends FSDataOutputStream {
883     volatile long faultPos = Long.MAX_VALUE;
884     private final AtomicBoolean fault;
885 
886     public FaultyOutputStream(FSDataOutputStream out, long faultPos, final AtomicBoolean fault)
887     throws IOException {
888       super(out, null);
889       this.faultPos = faultPos;
890       this.fault = fault;
891     }
892 
893     @Override
894     public void write(byte[] buf, int offset, int length) throws IOException {
895       System.err.println("faulty stream write at pos " + getPos());
896       injectFault();
897       super.write(buf, offset, length);
898     }
899 
900     private void injectFault() throws IOException {
901       if (this.fault.get() && getPos() >= faultPos) {
902         throw new IOException("Fault injected");
903       }
904     }
905   }
906 
907   private static void flushStore(HStore store, long id) throws IOException {
908     StoreFlushContext storeFlushCtx = store.createFlushContext(id);
909     storeFlushCtx.prepare();
910     storeFlushCtx.flushCache(Mockito.mock(MonitoredTask.class));
911     storeFlushCtx.commit(Mockito.mock(MonitoredTask.class));
912   }
913 
914   /**
915    * Generate a list of KeyValues for testing based on given parameters
916    * @param timestamps
917    * @param numRows
918    * @param qualifier
919    * @param family
920    * @return
921    */
922   List<Cell> getKeyValueSet(long[] timestamps, int numRows,
923       byte[] qualifier, byte[] family) {
924     List<Cell> kvList = new ArrayList<Cell>();
925     for (int i=1;i<=numRows;i++) {
926       byte[] b = Bytes.toBytes(i);
927       for (long timestamp: timestamps) {
928         kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
929       }
930     }
931     return kvList;
932   }
933 
934   /**
935    * Test to ensure correctness when using Stores with multiple timestamps
936    * @throws IOException
937    */
938   @Test
939   public void testMultipleTimestamps() throws IOException {
940     int numRows = 1;
941     long[] timestamps1 = new long[] {1,5,10,20};
942     long[] timestamps2 = new long[] {30,80};
943 
944     init(this.name.getMethodName());
945 
946     List<Cell> kvList1 = getKeyValueSet(timestamps1,numRows, qf1, family);
947     for (Cell kv : kvList1) {
948       this.store.add(KeyValueUtil.ensureKeyValue(kv));
949     }
950 
951     this.store.snapshot();
952     flushStore(store, id++);
953 
954     List<Cell> kvList2 = getKeyValueSet(timestamps2,numRows, qf1, family);
955     for(Cell kv : kvList2) {
956       this.store.add(KeyValueUtil.ensureKeyValue(kv));
957     }
958 
959     List<Cell> result;
960     Get get = new Get(Bytes.toBytes(1));
961     get.addColumn(family,qf1);
962 
963     get.setTimeRange(0,15);
964     result = HBaseTestingUtility.getFromStoreFile(store, get);
965     Assert.assertTrue(result.size()>0);
966 
967     get.setTimeRange(40,90);
968     result = HBaseTestingUtility.getFromStoreFile(store, get);
969     Assert.assertTrue(result.size()>0);
970 
971     get.setTimeRange(10,45);
972     result = HBaseTestingUtility.getFromStoreFile(store, get);
973     Assert.assertTrue(result.size()>0);
974 
975     get.setTimeRange(80,145);
976     result = HBaseTestingUtility.getFromStoreFile(store, get);
977     Assert.assertTrue(result.size()>0);
978 
979     get.setTimeRange(1,2);
980     result = HBaseTestingUtility.getFromStoreFile(store, get);
981     Assert.assertTrue(result.size()>0);
982 
983     get.setTimeRange(90,200);
984     result = HBaseTestingUtility.getFromStoreFile(store, get);
985     Assert.assertTrue(result.size()==0);
986   }
987 
988   /**
989    * Test for HBASE-3492 - Test split on empty colfam (no store files).
990    *
991    * @throws IOException When the IO operations fail.
992    */
993   @Test
994   public void testSplitWithEmptyColFam() throws IOException {
995     init(this.name.getMethodName());
996     Assert.assertNull(store.getSplitPoint());
997     store.getHRegion().forceSplit(null);
998     Assert.assertNull(store.getSplitPoint());
999     store.getHRegion().clearSplit();
1000   }
1001 
1002   @Test
1003   public void testStoreUsesConfigurationFromHcdAndHtd() throws Exception {
1004     final String CONFIG_KEY = "hbase.regionserver.thread.compaction.throttle";
1005     long anyValue = 10;
1006 
1007     // We'll check that it uses correct config and propagates it appropriately by going thru
1008     // the simplest "real" path I can find - "throttleCompaction", which just checks whether
1009     // a number we pass in is higher than some config value, inside compactionPolicy.
1010     Configuration conf = HBaseConfiguration.create();
1011     conf.setLong(CONFIG_KEY, anyValue);
1012     init(name.getMethodName() + "-xml", conf);
1013     Assert.assertTrue(store.throttleCompaction(anyValue + 1));
1014     Assert.assertFalse(store.throttleCompaction(anyValue));
1015 
1016     // HTD overrides XML.
1017     --anyValue;
1018     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
1019     HColumnDescriptor hcd = new HColumnDescriptor(family);
1020     htd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
1021     init(name.getMethodName() + "-htd", conf, htd, hcd);
1022     Assert.assertTrue(store.throttleCompaction(anyValue + 1));
1023     Assert.assertFalse(store.throttleCompaction(anyValue));
1024 
1025     // HCD overrides them both.
1026     --anyValue;
1027     hcd.setConfiguration(CONFIG_KEY, Long.toString(anyValue));
1028     init(name.getMethodName() + "-hcd", conf, htd, hcd);
1029     Assert.assertTrue(store.throttleCompaction(anyValue + 1));
1030     Assert.assertFalse(store.throttleCompaction(anyValue));
1031   }
1032 
1033   public static class DummyStoreEngine extends DefaultStoreEngine {
1034     public static DefaultCompactor lastCreatedCompactor = null;
1035     @Override
1036     protected void createComponents(
1037         Configuration conf, Store store, KVComparator comparator) throws IOException {
1038       super.createComponents(conf, store, comparator);
1039       lastCreatedCompactor = this.compactor;
1040     }
1041   }
1042 
1043   @Test
1044   public void testStoreUsesSearchEngineOverride() throws Exception {
1045     Configuration conf = HBaseConfiguration.create();
1046     conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DummyStoreEngine.class.getName());
1047     init(this.name.getMethodName(), conf);
1048     Assert.assertEquals(DummyStoreEngine.lastCreatedCompactor,
1049       this.store.storeEngine.getCompactor());
1050   }
1051 
1052   private void addStoreFile() throws IOException {
1053     StoreFile f = this.store.getStorefiles().iterator().next();
1054     Path storedir = f.getPath().getParent();
1055     long seqid = this.store.getMaxSequenceId();
1056     Configuration c = TEST_UTIL.getConfiguration();
1057     FileSystem fs = FileSystem.get(c);
1058     HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
1059     StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
1060         fs)
1061             .withOutputDir(storedir)
1062             .withFileContext(fileContext)
1063             .build();
1064     w.appendMetadata(seqid + 1, false);
1065     w.close();
1066     LOG.info("Added store file:" + w.getPath());
1067   }
1068 
1069   private void archiveStoreFile(int index) throws IOException {
1070     Collection<StoreFile> files = this.store.getStorefiles();
1071     StoreFile sf = null;
1072     Iterator<StoreFile> it = files.iterator();
1073     for (int i = 0; i <= index; i++) {
1074       sf = it.next();
1075     }
1076     store.getRegionFileSystem().removeStoreFiles(store.getColumnFamilyName(), Lists.newArrayList(sf));
1077   }
1078 
1079   private void closeCompactedFile(int index) throws IOException {
1080     Collection<StoreFile> files =
1081         this.store.getStoreEngine().getStoreFileManager().getCompactedfiles();
1082     StoreFile sf = null;
1083     Iterator<StoreFile> it = files.iterator();
1084     for (int i = 0; i <= index; i++) {
1085       sf = it.next();
1086     }
1087     sf.closeReader(true);
1088     store.getStoreEngine().getStoreFileManager().removeCompactedFiles(Lists.newArrayList(sf));
1089   }
1090 
1091   @Test
1092   public void testRefreshStoreFiles() throws Exception {
1093     init(name.getMethodName());
1094 
1095     assertEquals(0, this.store.getStorefilesCount());
1096 
1097     // Test refreshing store files when no store files are there
1098     store.refreshStoreFiles();
1099     assertEquals(0, this.store.getStorefilesCount());
1100 
1101     // add some data, flush
1102     this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
1103     flush(1);
1104     assertEquals(1, this.store.getStorefilesCount());
1105 
1106     // add one more file
1107     addStoreFile();
1108 
1109     assertEquals(1, this.store.getStorefilesCount());
1110     store.refreshStoreFiles();
1111     assertEquals(2, this.store.getStorefilesCount());
1112 
1113     // add three more files
1114     addStoreFile();
1115     addStoreFile();
1116     addStoreFile();
1117 
1118     assertEquals(2, this.store.getStorefilesCount());
1119     store.refreshStoreFiles();
1120     assertEquals(5, this.store.getStorefilesCount());
1121 
1122     closeCompactedFile(0);
1123     archiveStoreFile(0);
1124 
1125     assertEquals(5, this.store.getStorefilesCount());
1126     store.refreshStoreFiles();
1127     assertEquals(4, this.store.getStorefilesCount());
1128 
1129     archiveStoreFile(0);
1130     archiveStoreFile(1);
1131     archiveStoreFile(2);
1132 
1133     assertEquals(4, this.store.getStorefilesCount());
1134     store.refreshStoreFiles();
1135     assertEquals(1, this.store.getStorefilesCount());
1136 
1137     archiveStoreFile(0);
1138     store.refreshStoreFiles();
1139     assertEquals(0, this.store.getStorefilesCount());
1140   }
1141 
1142   @SuppressWarnings("unchecked")
1143   @Test
1144   public void testRefreshStoreFilesNotChanged() throws IOException {
1145     init(name.getMethodName());
1146 
1147     assertEquals(0, this.store.getStorefilesCount());
1148 
1149     // add some data, flush
1150     this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
1151     flush(1);
1152     // add one more file
1153     addStoreFile();
1154 
1155     HStore spiedStore = spy(store);
1156 
1157     // call first time after files changed
1158     spiedStore.refreshStoreFiles();
1159     assertEquals(2, this.store.getStorefilesCount());
1160     verify(spiedStore, times(1)).replaceStoreFiles(any(Collection.class), any(Collection.class));
1161 
1162     // call second time
1163     spiedStore.refreshStoreFiles();
1164 
1165     //ensure that replaceStoreFiles is not called if files are not refreshed
1166     verify(spiedStore, times(0)).replaceStoreFiles(null, null);
1167   }
1168 
1169   private Cell createCell(byte[] qualifier, long ts, long sequenceId, byte[] value) throws IOException {
1170     return createCell(row, qualifier, ts, sequenceId, value);
1171   }
1172 
1173   private Cell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, byte[] value) throws IOException {
1174     Cell c = CellUtil.createCell(row, family, qualifier, ts, KeyValue.Type.Put.getCode(), value);
1175     CellUtil.setSequenceId(c, sequenceId);
1176     return c;
1177   }
1178 
1179   @Test
1180   public void testScanWithDoubleFlush() throws IOException {
1181     Configuration conf = HBaseConfiguration.create();
1182     // Initialize region
1183     MyStore myStore = initMyStore(name.getMethodName(), conf, new MyStoreHook() {
1184       @Override
1185       public void getScanners(final MyStore store) throws IOException {
1186         final long tmpId = id++;
1187         ExecutorService s = Executors.newSingleThreadExecutor();
1188         s.submit(new Runnable() {
1189           @Override
1190           public void run() {
1191             try {
1192               // flush the store before storescanner updates the scanners from store.
1193               // The current data will be flushed into files and the memstore will
1194               // be clear.
1195               // -- phase (4/4)
1196               flushStore(store, tmpId);
1197             } catch (IOException ex) {
1198               throw new RuntimeException(ex);
1199             }
1200           }
1201         });
1202         s.shutdown();
1203         try {
1204           // wait for the flush, the thread will be blocked in HStore#notifyChangedReadersObservers.
1205           s.awaitTermination(500, TimeUnit.MILLISECONDS);
1206         } catch (InterruptedException ex) {
1207         }
1208       }
1209     });
1210     byte[] oldValue = Bytes.toBytes("oldValue");
1211     byte[] currentValue = Bytes.toBytes("currentValue");
1212     long ts = EnvironmentEdgeManager.currentTime();
1213     long seqId = 100;
1214     // older data whihc shouldn't be "seen" by client
1215     myStore.add(createCell(qf1, ts, seqId, oldValue));
1216     myStore.add(createCell(qf2, ts, seqId, oldValue));
1217     myStore.add(createCell(qf3, ts, seqId, oldValue));
1218     long snapshotId = id++;
1219     // push older data into snapshot -- phase (1/4)
1220     StoreFlushContext storeFlushCtx = store.createFlushContext(snapshotId);
1221     storeFlushCtx.prepare();
1222 
1223     // insert current data into active -- phase (2/4)
1224     myStore.add(createCell(qf1, ts + 1, seqId + 1, currentValue));
1225     myStore.add(createCell(qf2, ts + 1, seqId + 1, currentValue));
1226     myStore.add(createCell(qf3, ts + 1, seqId + 1, currentValue));
1227     TreeSet<byte[]> quals = new TreeSet<>(Bytes.BYTES_COMPARATOR);
1228     quals.add(qf1);
1229     quals.add(qf2);
1230     quals.add(qf3);
1231     try (InternalScanner scanner = (InternalScanner) myStore.getScanner(
1232             new Scan(new Get(row)), quals, seqId + 1)) {
1233       // complete the flush -- phase (3/4)
1234       storeFlushCtx.flushCache(Mockito.mock(MonitoredTask.class));
1235       storeFlushCtx.commit(Mockito.mock(MonitoredTask.class));
1236 
1237       List<Cell> results = new ArrayList<>();
1238       scanner.next(results);
1239       assertEquals(3, results.size());
1240       for (Cell c : results) {
1241         byte[] actualValue = CellUtil.cloneValue(c);
1242         assertTrue("expected:" + Bytes.toStringBinary(currentValue)
1243                 + ", actual:" + Bytes.toStringBinary(actualValue),
1244                  Bytes.equals(actualValue, currentValue));
1245       }
1246     }
1247 
1248   }
1249 
1250   @Test
1251   public void testFlushBeforeCompletingScanWoFilter() throws IOException, InterruptedException {
1252     final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
1253     testFlushBeforeCompletingScan(new MyListHook() {
1254       @Override
1255       public void hook(int currentSize) {
1256         if (currentSize == 2) {
1257           try {
1258             flushStore(store, id++);
1259             timeToGoNextRow.set(true);
1260           } catch (IOException e) {
1261             throw new RuntimeException(e);
1262           }
1263         }
1264       }
1265     }, new FilterBase() {
1266       @Override
1267       public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
1268         return Filter.ReturnCode.INCLUDE;
1269       }
1270     });
1271   }
1272 
1273   @Test
1274   public void testFlushBeforeCompletingScanWithFilter() throws IOException, InterruptedException {
1275     final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
1276     testFlushBeforeCompletingScan(new MyListHook() {
1277       @Override
1278       public void hook(int currentSize) {
1279         if (currentSize == 2) {
1280           try {
1281             flushStore(store, id++);
1282             timeToGoNextRow.set(true);
1283           } catch (IOException e) {
1284             throw new RuntimeException(e);
1285           }
1286         }
1287       }
1288     }, new FilterBase() {
1289       @Override
1290       public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
1291         if (timeToGoNextRow.get()) {
1292           timeToGoNextRow.set(false);
1293           return Filter.ReturnCode.NEXT_ROW;
1294         } else {
1295           return Filter.ReturnCode.INCLUDE;
1296         }
1297       }
1298     });
1299   }
1300 
1301   @Test
1302   public void testFlushBeforeCompletingScanWithFilterHint() throws IOException, InterruptedException {
1303     final AtomicBoolean timeToGetHint = new AtomicBoolean(false);
1304     testFlushBeforeCompletingScan(new MyListHook() {
1305       @Override
1306       public void hook(int currentSize) {
1307         if (currentSize == 2) {
1308           try {
1309             flushStore(store, id++);
1310             timeToGetHint.set(true);
1311           } catch (IOException e) {
1312             throw new RuntimeException(e);
1313           }
1314         }
1315       }
1316     }, new FilterBase() {
1317       @Override
1318       public Filter.ReturnCode filterKeyValue(Cell v) throws IOException {
1319         if (timeToGetHint.get()) {
1320           timeToGetHint.set(false);
1321           return Filter.ReturnCode.SEEK_NEXT_USING_HINT;
1322         } else {
1323           return Filter.ReturnCode.INCLUDE;
1324         }
1325       }
1326       @Override
1327       public Cell getNextCellHint(Cell currentCell) throws IOException {
1328         return currentCell;
1329       }
1330     });
1331   }
1332 
1333   private void testFlushBeforeCompletingScan(MyListHook hook, Filter filter)
1334           throws IOException, InterruptedException {
1335     Configuration conf = HBaseConfiguration.create();
1336     HColumnDescriptor hcd = new HColumnDescriptor(family);
1337     hcd.setMaxVersions(1);
1338     byte[] r0 = Bytes.toBytes("row0");
1339     byte[] r1 = Bytes.toBytes("row1");
1340     byte[] r2 = Bytes.toBytes("row2");
1341     byte[] value0 = Bytes.toBytes("value0");
1342     byte[] value1 = Bytes.toBytes("value1");
1343     byte[] value2 = Bytes.toBytes("value2");
1344     long ts = EnvironmentEdgeManager.currentTime();
1345     final long seqId = 100;
1346     init(name.getMethodName(), conf, new HTableDescriptor(TableName.valueOf(table)), hcd, new MyStoreHook() {
1347       @Override
1348       public long getSmallestReadPoint(HStore store) {
1349         return seqId + 3;
1350       }
1351     });
1352     // The cells having the value0 won't be flushed to disk because the value of max version is 1
1353     store.add(createCell(r0, qf1, ts, seqId, value0));
1354     store.add(createCell(r0, qf2, ts, seqId, value0));
1355     store.add(createCell(r0, qf3, ts, seqId, value0));
1356     store.add(createCell(r1, qf1, ts + 1, seqId + 1, value1));
1357     store.add(createCell(r1, qf2, ts + 1, seqId + 1, value1));
1358     store.add(createCell(r1, qf3, ts + 1, seqId + 1, value1));
1359     store.add(createCell(r2, qf1, ts + 2, seqId + 2, value2));
1360     store.add(createCell(r2, qf2, ts + 2, seqId + 2, value2));
1361     store.add(createCell(r2, qf3, ts + 2, seqId + 2, value2));
1362     store.add(createCell(r1, qf1, ts + 3, seqId + 3, value1));
1363     store.add(createCell(r1, qf2, ts + 3, seqId + 3, value1));
1364     store.add(createCell(r1, qf3, ts + 3, seqId + 3, value1));
1365     List<Cell> myList = new MyList<>(hook);
1366     Scan scan = new Scan()
1367             .withStartRow(r1)
1368             .setFilter(filter);
1369     try (InternalScanner scanner = (InternalScanner) store.getScanner(
1370           scan, null, seqId + 3)){
1371       // r1
1372       scanner.next(myList);
1373       assertEquals(3, myList.size());
1374       for (Cell c : myList) {
1375         byte[] actualValue = CellUtil.cloneValue(c);
1376         assertTrue("expected:" + Bytes.toStringBinary(value1)
1377           + ", actual:" + Bytes.toStringBinary(actualValue)
1378           , Bytes.equals(actualValue, value1));
1379       }
1380       List<Cell> normalList = new ArrayList<>(3);
1381       // r2
1382       scanner.next(normalList);
1383       assertEquals(3, normalList.size());
1384       for (Cell c : normalList) {
1385         byte[] actualValue = CellUtil.cloneValue(c);
1386         assertTrue("expected:" + Bytes.toStringBinary(value2)
1387           + ", actual:" + Bytes.toStringBinary(actualValue)
1388           , Bytes.equals(actualValue, value2));
1389       }
1390     }
1391   }
1392 
1393   @Test
1394   public void testReclaimChunkWhenScaning() throws IOException {
1395     Configuration conf = HBaseConfiguration.create();
1396     conf.setFloat(CHUNK_POOL_MAXSIZE_KEY, 1);
1397     init("testReclaimChunkWhenScaning", conf);
1398     final long ts = EnvironmentEdgeManager.currentTime();
1399     final long seqId = 100;
1400     byte[] value = Bytes.toBytes("value");
1401     // older data whihc shouldn't be "seen" by client
1402     store.add(createCell(qf1, ts, seqId, value));
1403     store.add(createCell(qf2, ts, seqId, value));
1404     store.add(createCell(qf3, ts, seqId, value));
1405     TreeSet<byte[]> quals = new TreeSet<>(Bytes.BYTES_COMPARATOR);
1406     quals.add(qf1);
1407     quals.add(qf2);
1408     quals.add(qf3);
1409     try (InternalScanner scanner = (InternalScanner) store.getScanner(
1410         new Scan(new Get(row)), quals, seqId)) {
1411       List<Cell> results = new MyList<>(new MyListHook() {
1412         @Override
1413         public void hook(int size) {
1414           switch (size) {
1415             // 1) we get the first cell (qf1)
1416             // 2) flush the data to have StoreScanner update inner scanners
1417             // 3) the chunk will be reclaimed after updaing
1418             case 1:
1419               try {
1420                 flushStore(store, id++);
1421               } catch (IOException e) {
1422                 throw new RuntimeException(e);
1423               }
1424               break;
1425             // 1) we get the second cell (qf2)
1426             // 2) add some cell to fill some byte into the chunk (we have only one chunk)
1427             case 2:
1428               try {
1429                 byte[] newValue = Bytes.toBytes("newValue");
1430                 // older data whihc shouldn't be "seen" by client
1431                 store.add(createCell(qf1, ts + 1, seqId + 1, newValue));
1432                 store.add(createCell(qf2, ts + 1, seqId + 1, newValue));
1433                 store.add(createCell(qf3, ts + 1, seqId + 1, newValue));
1434               } catch (IOException e) {
1435                 throw new RuntimeException(e);
1436               }
1437               break;
1438             default:
1439               break;
1440           }
1441         }
1442       });
1443       scanner.next(results);
1444       assertEquals(3, results.size());
1445       for (Cell c : results) {
1446         byte[] actualValue = CellUtil.cloneValue(c);
1447         assertTrue("expected:" + Bytes.toStringBinary(value)
1448           + ", actual:" + Bytes.toStringBinary(actualValue)
1449           , Bytes.equals(actualValue, value));
1450       }
1451     }
1452   }
1453 
1454   @Test
1455   public void testHFileContextSetWithCFAndTable() throws Exception {
1456     init(this.name.getMethodName());
1457     StoreFile.Writer writer = store.createWriterInTmp(10000L,
1458         Compression.Algorithm.NONE, false, true, false, true, -1);
1459     HFileContext hFileContext = writer.getHFileWriter().getFileContext();
1460     assertArrayEquals(family, hFileContext.getColumnFamily());
1461     assertArrayEquals(table, hFileContext.getTableName());
1462   }
1463 
1464   private MyStore initMyStore(String methodName, Configuration conf, MyStoreHook hook) throws IOException {
1465     HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
1466     HColumnDescriptor hcd = new HColumnDescriptor(family);
1467     hcd.setMaxVersions(5);
1468     return (MyStore) init(methodName, conf, htd, hcd, hook);
1469   }
1470 
1471   private static class MyStore extends HStore {
1472 
1473     private final MyStoreHook hook;
1474 
1475     MyStore(final HRegion region, final HColumnDescriptor family,
1476             final Configuration confParam, MyStoreHook hook) throws IOException {
1477       super(region, family, confParam);
1478       this.hook = hook;
1479     }
1480 
1481     @Override
1482     public List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks,
1483             boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
1484             byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner) throws IOException {
1485       hook.getScanners(this);
1486       return super.getScanners(files, cacheBlocks, isGet, usePread,
1487               isCompaction, matcher, startRow, stopRow, readPt, includeMemstoreScanner);
1488     }
1489 
1490     @Override
1491     public long getSmallestReadPoint() {
1492       return hook.getSmallestReadPoint(this);
1493     }
1494   }
1495 
1496   private abstract class MyStoreHook {
1497     void getScanners(MyStore store) throws IOException {
1498     }
1499     long getSmallestReadPoint(HStore store) {
1500       return store.getHRegion().getSmallestReadPoint();
1501     }
1502   }
1503 
1504   interface MyListHook {
1505     void hook(int currentSize);
1506   }
1507 
1508   private static class MyList<T> implements List<T> {
1509     private final List<T> delegatee = new ArrayList<>();
1510     private final MyListHook hookAtAdd;
1511     MyList(final MyListHook hookAtAdd) {
1512       this.hookAtAdd = hookAtAdd;
1513     }
1514     @Override
1515     public int size() {return delegatee.size();}
1516 
1517     @Override
1518     public boolean isEmpty() {return delegatee.isEmpty();}
1519 
1520     @Override
1521     public boolean contains(Object o) {return delegatee.contains(o);}
1522 
1523     @Override
1524     public Iterator<T> iterator() {return delegatee.iterator();}
1525 
1526     @Override
1527     public Object[] toArray() {return delegatee.toArray();}
1528 
1529     @Override
1530     public <T> T[] toArray(T[] a) {return delegatee.toArray(a);}
1531 
1532     @Override
1533     public boolean add(T e) {
1534       hookAtAdd.hook(size());
1535       return delegatee.add(e);
1536     }
1537 
1538     @Override
1539     public boolean remove(Object o) {return delegatee.remove(o);}
1540 
1541     @Override
1542     public boolean containsAll(Collection<?> c) {return delegatee.containsAll(c);}
1543 
1544     @Override
1545     public boolean addAll(Collection<? extends T> c) {return delegatee.addAll(c);}
1546 
1547     @Override
1548     public boolean addAll(int index, Collection<? extends T> c) {return delegatee.addAll(index, c);}
1549 
1550     @Override
1551     public boolean removeAll(Collection<?> c) {return delegatee.removeAll(c);}
1552 
1553     @Override
1554     public boolean retainAll(Collection<?> c) {return delegatee.retainAll(c);}
1555 
1556     @Override
1557     public void clear() {delegatee.clear();}
1558 
1559     @Override
1560     public T get(int index) {return delegatee.get(index);}
1561 
1562     @Override
1563     public T set(int index, T element) {return delegatee.set(index, element);}
1564 
1565     @Override
1566     public void add(int index, T element) {delegatee.add(index, element);}
1567 
1568     @Override
1569     public T remove(int index) {return delegatee.remove(index);}
1570 
1571     @Override
1572     public int indexOf(Object o) {return delegatee.indexOf(o);}
1573 
1574     @Override
1575     public int lastIndexOf(Object o) {return delegatee.lastIndexOf(o);}
1576 
1577     @Override
1578     public ListIterator<T> listIterator() {return delegatee.listIterator();}
1579 
1580     @Override
1581     public ListIterator<T> listIterator(int index) {return delegatee.listIterator(index);}
1582 
1583     @Override
1584     public List<T> subList(int fromIndex, int toIndex) {return delegatee.subList(fromIndex, toIndex);}
1585   }
1586 }