View Javadoc

1   /*
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.io.hfile;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotEquals;
25  import static org.junit.Assert.assertTrue;
26  
27  import java.io.IOException;
28  import java.util.ArrayList;
29  import java.util.Collection;
30  import java.util.EnumMap;
31  import java.util.List;
32  import java.util.Random;
33  import java.util.Set;
34  
35  import com.google.common.collect.ImmutableSet;
36  import org.apache.commons.logging.Log;
37  import org.apache.commons.logging.LogFactory;
38  import org.apache.hadoop.conf.Configuration;
39  import org.apache.hadoop.fs.FileSystem;
40  import org.apache.hadoop.fs.Path;
41  import org.apache.hadoop.hbase.HBaseTestingUtility;
42  import org.apache.hadoop.hbase.HColumnDescriptor;
43  import org.apache.hadoop.hbase.HConstants;
44  import org.apache.hadoop.hbase.KeyValue;
45  import org.apache.hadoop.hbase.testclassification.MediumTests;
46  import org.apache.hadoop.hbase.Tag;
47  import org.apache.hadoop.hbase.client.Durability;
48  import org.apache.hadoop.hbase.client.Put;
49  import org.apache.hadoop.hbase.fs.HFileSystem;
50  import org.apache.hadoop.hbase.io.compress.Compression;
51  import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
52  import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
53  import org.apache.hadoop.hbase.regionserver.BloomType;
54  import org.apache.hadoop.hbase.regionserver.HRegion;
55  import org.apache.hadoop.hbase.regionserver.Region;
56  import org.apache.hadoop.hbase.regionserver.StoreFile;
57  import org.apache.hadoop.hbase.util.BloomFilterFactory;
58  import org.apache.hadoop.hbase.util.Bytes;
59  import org.apache.hadoop.hbase.util.ChecksumType;
60  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
61  import org.junit.After;
62  import org.junit.AfterClass;
63  import org.junit.Before;
64  import org.junit.Test;
65  import org.junit.experimental.categories.Category;
66  import org.junit.runner.RunWith;
67  import org.junit.runners.Parameterized;
68  import org.junit.runners.Parameterized.Parameters;
69  
70  import com.google.common.collect.Lists;
71  
72  /**
73   * Tests {@link HFile} cache-on-write functionality for the following block
74   * types: data blocks, non-root index blocks, and Bloom filter blocks.
75   */
76  @RunWith(Parameterized.class)
77  @Category(MediumTests.class)
78  public class TestCacheOnWrite {
79  
80    private static final Log LOG = LogFactory.getLog(TestCacheOnWrite.class);
81  
82    private static final HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
83    private Configuration conf;
84    private CacheConfig cacheConf;
85    private FileSystem fs;
86    private Random rand = new Random(12983177L);
87    private Path storeFilePath;
88    private BlockCache blockCache;
89    private String testDescription;
90  
91    private final CacheOnWriteType cowType;
92    private final Compression.Algorithm compress;
93    private final boolean cacheCompressedData;
94  
95    private static final int DATA_BLOCK_SIZE = 2048;
96    private static final int NUM_KV = 25000;
97    private static final int INDEX_BLOCK_SIZE = 512;
98    private static final int BLOOM_BLOCK_SIZE = 4096;
99    private static final BloomType BLOOM_TYPE = BloomType.ROWCOL;
100   private static final int CKBYTES = 512;
101 
102 
103   private static final Set<BlockType> INDEX_BLOCK_TYPES = ImmutableSet.of(
104     BlockType.INDEX_V1,
105     BlockType.INTERMEDIATE_INDEX,
106     BlockType.ROOT_INDEX,
107     BlockType.LEAF_INDEX
108   );
109   private static final Set<BlockType> BLOOM_BLOCK_TYPES = ImmutableSet.of(
110     BlockType.BLOOM_CHUNK,
111     BlockType.GENERAL_BLOOM_META,
112     BlockType.DELETE_FAMILY_BLOOM_META
113   );
114   private static final Set<BlockType> DATA_BLOCK_TYPES = ImmutableSet.of(
115     BlockType.ENCODED_DATA,
116     BlockType.DATA
117   );
118 
119   // All test cases are supposed to generate files for compaction within this range
120   private static final long CACHE_COMPACTION_LOW_THRESHOLD = 10L;
121   private static final long CACHE_COMPACTION_HIGH_THRESHOLD = 1 * 1024 * 1024 * 1024L;
122 
123   /** The number of valid key types possible in a store file */
124   private static final int NUM_VALID_KEY_TYPES =
125       KeyValue.Type.values().length - 2;
126 
127   private static enum CacheOnWriteType {
128     DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
129         BlockType.DATA, BlockType.ENCODED_DATA),
130     BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
131         BlockType.BLOOM_CHUNK),
132     INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
133         BlockType.LEAF_INDEX, BlockType.INTERMEDIATE_INDEX);
134 
135     private final String confKey;
136     private final BlockType blockType1;
137     private final BlockType blockType2;
138 
139     private CacheOnWriteType(String confKey, BlockType blockType) {
140       this(confKey, blockType, blockType);
141     }
142 
143     private CacheOnWriteType(String confKey, BlockType blockType1,
144         BlockType blockType2) {
145       this.blockType1 = blockType1;
146       this.blockType2 = blockType2;
147       this.confKey = confKey;
148     }
149 
150     public boolean shouldBeCached(BlockType blockType) {
151       return blockType == blockType1 || blockType == blockType2;
152     }
153 
154     public void modifyConf(Configuration conf) {
155       for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
156         conf.setBoolean(cowType.confKey, cowType == this);
157       }
158     }
159   }
160 
161   public TestCacheOnWrite(CacheOnWriteType cowType, Compression.Algorithm compress,
162       boolean cacheCompressedData, BlockCache blockCache) {
163     this.cowType = cowType;
164     this.compress = compress;
165     this.cacheCompressedData = cacheCompressedData;
166     this.blockCache = blockCache;
167     testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress +
168         ", cacheCompressedData=" + cacheCompressedData + "]";
169     LOG.info(testDescription);
170   }
171 
172   private static List<BlockCache> getBlockCaches() throws IOException {
173     Configuration conf = TEST_UTIL.getConfiguration();
174     List<BlockCache> blockcaches = new ArrayList<BlockCache>();
175     // default
176     blockcaches.add(new CacheConfig(conf).getBlockCache());
177 
178     //set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287
179     TEST_UTIL.getConfiguration().setFloat(LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, 2.0f);
180     // memory
181     BlockCache lru = new LruBlockCache(128 * 1024 * 1024, 64 * 1024, TEST_UTIL.getConfiguration());
182     blockcaches.add(lru);
183 
184     // bucket cache
185     FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir());
186     int[] bucketSizes =
187         { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 };
188     BlockCache bucketcache =
189         new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null);
190     blockcaches.add(bucketcache);
191     return blockcaches;
192   }
193 
194   @Parameters
195   public static Collection<Object[]> getParameters() throws IOException {
196     List<Object[]> params = new ArrayList<Object[]>();
197     for (BlockCache blockCache : getBlockCaches()) {
198       for (CacheOnWriteType cowType : CacheOnWriteType.values()) {
199         for (Compression.Algorithm compress : HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
200           for (boolean cacheCompressedData : new boolean[] { false, true }) {
201             params.add(new Object[] { cowType, compress, cacheCompressedData, blockCache });
202           }
203         }
204       }
205     }
206     return params;
207   }
208 
209   private void clearBlockCache(BlockCache blockCache) throws InterruptedException {
210     if (blockCache instanceof LruBlockCache) {
211       ((LruBlockCache) blockCache).clearCache();
212     } else {
213       // BucketCache may not return all cached blocks(blocks in write queue), so check it here.
214       for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) {
215         if (clearCount > 0) {
216           LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, "
217               + blockCache.getBlockCount() + " blocks remaining");
218           Thread.sleep(10);
219         }
220         for (CachedBlock block : Lists.newArrayList(blockCache)) {
221           BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset());
222           // CombinedBucketCache may need evict two times.
223           for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) {
224             if (evictCount > 1) {
225               LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount
226                   + " times, maybe a bug here");
227             }
228           }
229         }
230       }
231     }
232   }
233 
234   @Before
235   public void setUp() throws IOException {
236     conf = TEST_UTIL.getConfiguration();
237     this.conf.set("dfs.datanode.data.dir.perm", "700");
238     conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
239     conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
240     conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
241         BLOOM_BLOCK_SIZE);
242     conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
243     cowType.modifyConf(conf);
244     fs = HFileSystem.get(conf);
245     CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
246     cacheConf =
247         new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
248         cowType.shouldBeCached(BlockType.LEAF_INDEX),
249         cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
250             false, false, false, false);
251   }
252 
253   @After
254   public void tearDown() throws IOException, InterruptedException {
255     clearBlockCache(blockCache);
256   }
257 
258   @AfterClass
259   public static void afterClass() throws IOException {
260     TEST_UTIL.cleanupTestDir();
261   }
262 
263   private void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException {
264     writeStoreFile(useTags);
265     readStoreFile(useTags);
266   }
267 
268   private void readStoreFile(boolean useTags) throws IOException {
269     AbstractHFileReader reader;
270     if (useTags) {
271         reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf, conf);
272     } else {
273         reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf, conf);
274     }
275     LOG.info("HFile information: " + reader);
276     HFileContext meta = new HFileContextBuilder().withCompression(compress)
277       .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
278       .withBlockSize(DATA_BLOCK_SIZE)
279       .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
280       .withIncludesTags(useTags).build();
281     final boolean cacheBlocks = false;
282     final boolean pread = false;
283     HFileScanner scanner = reader.getScanner(cacheBlocks, pread);
284     assertTrue(testDescription, scanner.seekTo());
285 
286     long offset = 0;
287     EnumMap<BlockType, Integer> blockCountByType =
288         new EnumMap<BlockType, Integer>(BlockType.class);
289 
290     DataBlockEncoding encodingInCache = NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding();
291     while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
292       // Flags: don't cache the block, use pread, this is not a compaction.
293       // Also, pass null for expected block type to avoid checking it.
294       HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null,
295           encodingInCache);
296       BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
297           offset);
298       HFileBlock fromCache = (HFileBlock) blockCache.getBlock(blockCacheKey, true, false, true);
299       boolean isCached = fromCache != null;
300       boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
301       assertTrue("shouldBeCached: " + shouldBeCached+ "\n" +
302           "isCached: " + isCached + "\n" +
303           "Test description: " + testDescription + "\n" +
304           "block: " + block + "\n" +
305           "encodingInCache: " + encodingInCache + "\n" +
306           "blockCacheKey: " + blockCacheKey,
307         shouldBeCached == isCached);
308       if (isCached) {
309         if (cacheConf.shouldCacheCompressed(fromCache.getBlockType().getCategory())) {
310           if (compress != Compression.Algorithm.NONE) {
311             assertFalse(fromCache.isUnpacked());
312           }
313           fromCache = fromCache.unpack(meta, reader.getUncachedBlockReader());
314         } else {
315           assertTrue(fromCache.isUnpacked());
316         }
317         // block we cached at write-time and block read from file should be identical
318         assertEquals(block.getChecksumType(), fromCache.getChecksumType());
319         assertEquals(block.getBlockType(), fromCache.getBlockType());
320         assertNotEquals(block.getBlockType(), BlockType.ENCODED_DATA);
321         assertEquals(block.getOnDiskSizeWithHeader(), fromCache.getOnDiskSizeWithHeader());
322         assertEquals(block.getOnDiskSizeWithoutHeader(), fromCache.getOnDiskSizeWithoutHeader());
323         assertEquals(
324           block.getUncompressedSizeWithoutHeader(), fromCache.getUncompressedSizeWithoutHeader());
325       }
326       offset += block.getOnDiskSizeWithHeader();
327       BlockType bt = block.getBlockType();
328       Integer count = blockCountByType.get(bt);
329       blockCountByType.put(bt, (count == null ? 0 : count) + 1);
330     }
331 
332     LOG.info("Block count by type: " + blockCountByType);
333     String countByType = blockCountByType.toString();
334     if (useTags) {
335       assertEquals("{" + BlockType.DATA
336           + "=2663, LEAF_INDEX=297, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=32}", countByType);
337     } else {
338       assertEquals("{" + BlockType.DATA
339           + "=2498, LEAF_INDEX=278, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=31}", countByType);
340     }
341 
342     // iterate all the keyvalue from hfile
343     while (scanner.next()) {
344       scanner.getKeyValue();
345     }
346     reader.close();
347   }
348 
349   public static KeyValue.Type generateKeyType(Random rand) {
350     if (rand.nextBoolean()) {
351       // Let's make half of KVs puts.
352       return KeyValue.Type.Put;
353     } else {
354       KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)];
355       if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) {
356         throw new RuntimeException("Generated an invalid key type: " + keyType + ". "
357             + "Probably the layout of KeyValue.Type has changed.");
358       }
359       return keyType;
360     }
361   }
362 
363   private void writeStoreFile(boolean useTags) throws IOException {
364     if(useTags) {
365       TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
366     } else {
367       TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2);
368     }
369     Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
370         "test_cache_on_write");
371     HFileContext meta = new HFileContextBuilder().withCompression(compress)
372         .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL)
373         .withBlockSize(DATA_BLOCK_SIZE)
374         .withDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
375         .withIncludesTags(useTags).build();
376     StoreFile.Writer sfw = new StoreFile.WriterBuilder(conf, cacheConf, fs)
377         .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR)
378         .withFileContext(meta)
379         .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build();
380     byte[] cf = Bytes.toBytes("fam");
381     for (int i = 0; i < NUM_KV; ++i) {
382       byte[] row = TestHFileWriterV2.randomOrderedKey(rand, i);
383       byte[] qualifier = TestHFileWriterV2.randomRowOrQualifier(rand);
384       byte[] value = TestHFileWriterV2.randomValue(rand);
385       KeyValue kv;
386       if(useTags) {
387         Tag t = new Tag((byte) 1, "visibility");
388         List<Tag> tagList = new ArrayList<Tag>();
389         tagList.add(t);
390         Tag[] tags = new Tag[1];
391         tags[0] = t;
392         kv =
393             new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length,
394                 Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length, tagList);
395       } else {
396         kv =
397             new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length,
398                 Math.abs(rand.nextLong()), generateKeyType(rand), value, 0, value.length);
399       }
400       sfw.append(kv);
401     }
402 
403     sfw.close();
404     storeFilePath = sfw.getPath();
405   }
406 
407   private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags,
408       boolean cacheBlocksOnCompaction, long cacheBlocksOnCompactionThreshold)
409       throws IOException, InterruptedException {
410     // create a localConf
411     boolean localValue = conf.getBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, false);
412     long localCacheCompactedBlocksThreshold = conf
413       .getLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
414         CacheConfig.DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD);
415     boolean localCacheBloomBlocksValue = conf
416       .getBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
417         CacheConfig.DEFAULT_CACHE_BLOOMS_ON_WRITE);
418     boolean localCacheIndexBlocksValue = conf
419       .getBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
420         CacheConfig.DEFAULT_CACHE_INDEXES_ON_WRITE);
421 
422     try {
423       // Set the conf if testing caching compacted blocks on write
424       conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY,
425         cacheBlocksOnCompaction);
426 
427       // set size threshold if testing compaction size threshold
428       if (cacheBlocksOnCompactionThreshold > 0) {
429         conf.setLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
430           cacheBlocksOnCompactionThreshold);
431       }
432 
433       // TODO: need to change this test if we add a cache size threshold for
434       // compactions, or if we implement some other kind of intelligent logic for
435       // deciding what blocks to cache-on-write on compaction.
436       final String table = "CompactionCacheOnWrite";
437       final String cf = "myCF";
438       final byte[] cfBytes = Bytes.toBytes(cf);
439       final int maxVersions = 3;
440       Region region = TEST_UTIL.createTestRegion(table,
441         new HColumnDescriptor(cf)
442           .setCompressionType(compress)
443           .setBloomFilterType(BLOOM_TYPE)
444           .setMaxVersions(maxVersions)
445           .setDataBlockEncoding(NoOpDataBlockEncoder.INSTANCE.getDataBlockEncoding())
446       );
447       int rowIdx = 0;
448       long ts = EnvironmentEdgeManager.currentTime();
449       for (int iFile = 0; iFile < 5; ++iFile) {
450         for (int iRow = 0; iRow < 500; ++iRow) {
451           String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" + iRow;
452           Put p = new Put(Bytes.toBytes(rowStr));
453           ++rowIdx;
454           for (int iCol = 0; iCol < 10; ++iCol) {
455             String qualStr = "col" + iCol;
456             String valueStr = "value_" + rowStr + "_" + qualStr;
457             for (int iTS = 0; iTS < 5; ++iTS) {
458               if (useTags) {
459                 Tag t = new Tag((byte) 1, "visibility");
460                 Tag[] tags = new Tag[1];
461                 tags[0] = t;
462                 KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),
463                   HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags);
464                 p.add(kv);
465               } else {
466                 KeyValue kv = new KeyValue(Bytes.toBytes(rowStr), cfBytes, Bytes.toBytes(qualStr),
467                   ts++, Bytes.toBytes(valueStr));
468                 p.add(kv);
469               }
470             }
471           }
472           p.setDurability(Durability.ASYNC_WAL);
473           region.put(p);
474         }
475         region.flush(true);
476       }
477 
478       clearBlockCache(blockCache);
479       assertEquals(0, blockCache.getBlockCount());
480 
481       region.compact(false);
482       LOG.debug("compactStores() returned");
483 
484       boolean dataBlockCached = false;
485       boolean bloomBlockCached = false;
486       boolean indexBlockCached = false;
487 
488       for (CachedBlock block : blockCache) {
489         if (DATA_BLOCK_TYPES.contains(block.getBlockType())) {
490           dataBlockCached = true;
491         } else if (BLOOM_BLOCK_TYPES.contains(block.getBlockType())) {
492           bloomBlockCached = true;
493         } else if (INDEX_BLOCK_TYPES.contains(block.getBlockType())) {
494           indexBlockCached = true;
495         }
496       }
497       // Data blocks should be cached in instances where we are caching blocks on write. In the case
498       // of testing
499       // BucketCache, we cannot verify block type as it is not stored in the cache.
500       boolean cacheOnCompactAndNonBucketCache = cacheBlocksOnCompaction
501         && !(blockCache instanceof BucketCache);
502 
503       String assertErrorMessage = "\nTest description: " + testDescription +
504         "\ncacheBlocksOnCompaction: "
505         + cacheBlocksOnCompaction + "\n";
506 
507       if (cacheOnCompactAndNonBucketCache && cacheBlocksOnCompactionThreshold > 0) {
508         if (cacheBlocksOnCompactionThreshold == CACHE_COMPACTION_HIGH_THRESHOLD) {
509           assertTrue(assertErrorMessage, dataBlockCached);
510           assertTrue(assertErrorMessage, bloomBlockCached);
511           assertTrue(assertErrorMessage, indexBlockCached);
512         } else {
513           assertFalse(assertErrorMessage, dataBlockCached);
514 
515           if (localCacheBloomBlocksValue) {
516             assertTrue(assertErrorMessage, bloomBlockCached);
517           } else {
518             assertFalse(assertErrorMessage, bloomBlockCached);
519           }
520 
521           if (localCacheIndexBlocksValue) {
522             assertTrue(assertErrorMessage, indexBlockCached);
523           } else {
524             assertFalse(assertErrorMessage, indexBlockCached);
525           }
526         }
527       } else {
528         assertEquals(assertErrorMessage, cacheOnCompactAndNonBucketCache, dataBlockCached);
529 
530         if (cacheOnCompactAndNonBucketCache) {
531           assertTrue(assertErrorMessage, bloomBlockCached);
532           assertTrue(assertErrorMessage, indexBlockCached);
533         }
534       }
535 
536       ((HRegion)region).close();
537     } finally {
538       // reset back
539       conf.setBoolean(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_KEY, localValue);
540       conf.setLong(CacheConfig.CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
541         localCacheCompactedBlocksThreshold);
542       conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, localCacheBloomBlocksValue);
543       conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, localCacheIndexBlocksValue);
544     }
545   }
546 
547   @Test
548   public void testStoreFileCacheOnWrite() throws IOException {
549     testStoreFileCacheOnWriteInternals(false);
550     testStoreFileCacheOnWriteInternals(true);
551   }
552 
553   @Test
554   public void testNotCachingDataBlocksDuringCompaction() throws IOException, InterruptedException {
555     testNotCachingDataBlocksDuringCompactionInternals(false, false, -1);
556     testNotCachingDataBlocksDuringCompactionInternals(true, true, -1);
557   }
558 
559   @Test
560   public void testCachingDataBlocksThresholdDuringCompaction()
561       throws IOException, InterruptedException {
562     testNotCachingDataBlocksDuringCompactionInternals(false, true,
563       CACHE_COMPACTION_HIGH_THRESHOLD);
564     testNotCachingDataBlocksDuringCompactionInternals(false, true, CACHE_COMPACTION_LOW_THRESHOLD);
565   }
566 }