View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.List;
24  
25  import org.apache.hadoop.hbase.HConstants;
26  import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
27  import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy;
28  import org.apache.hadoop.hbase.testclassification.SmallTests;
29  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
30  import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
31  import org.junit.Assert;
32  import org.junit.Test;
33  import org.junit.experimental.categories.Category;
34  
35  @Category(SmallTests.class)
36  public class TestDefaultCompactSelection extends TestCompactionPolicy {
37  
38    @Test
39    public void testCompactionRatio() throws IOException {
40      TimeOffsetEnvironmentEdge edge = new TimeOffsetEnvironmentEdge();
41      EnvironmentEdgeManager.injectEdge(edge);
42      /**
43       * NOTE: these tests are specific to describe the implementation of the
44       * current compaction algorithm.  Developed to ensure that refactoring
45       * doesn't implicitly alter this.
46       */
47      long tooBig = maxSize + 1;
48  
49      // default case. preserve user ratio on size
50      compactEquals(sfCreate(100,50,23,12,12), 23, 12, 12);
51      // less than compact threshold = don't compact
52      compactEquals(sfCreate(100,50,25,12,12) /* empty */);
53      // greater than compact size = skip those
54      compactEquals(sfCreate(tooBig, tooBig, 700, 700, 700), 700, 700, 700);
55      // big size + threshold
56      compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */);
57      // small files = don't care about ratio
58      compactEquals(sfCreate(7,1,1), 7,1,1);
59  
60      // don't exceed max file compact threshold
61      // note:  file selection starts with largest to smallest.
62      compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
63  
64      compactEquals(sfCreate(50, 10, 10 ,10, 10), 10, 10, 10, 10);
65  
66      compactEquals(sfCreate(10, 10, 10, 10, 50), 10, 10, 10, 10);
67  
68      compactEquals(sfCreate(251, 253, 251, maxSize -1), 251, 253, 251);
69  
70      compactEquals(sfCreate(maxSize -1,maxSize -1,maxSize -1) /* empty */);
71  
72      // Always try and compact something to get below blocking storefile count
73      this.conf.setLong("hbase.hstore.compaction.min.size", 1);
74      store.storeEngine.getCompactionPolicy().setConf(conf);
75      compactEquals(sfCreate(512,256,128,64,32,16,8,4,2,1), 4,2,1);
76      this.conf.setLong("hbase.hstore.compaction.min.size", minSize);
77      store.storeEngine.getCompactionPolicy().setConf(conf);
78  
79      /* MAJOR COMPACTION */
80      // if a major compaction has been forced, then compact everything
81      compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12);
82      // also choose files < threshold on major compaction
83      compactEquals(sfCreate(12,12), true, 12, 12);
84      // even if one of those files is too big
85      compactEquals(sfCreate(tooBig, 12,12), true, tooBig, 12, 12);
86      // don't exceed max file compact threshold, even with major compaction
87      store.forceMajor = true;
88      compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1);
89      store.forceMajor = false;
90      // if we exceed maxCompactSize, downgrade to minor
91      // if not, it creates a 'snowball effect' when files >> maxCompactSize:
92      // the last file in compaction is the aggregate of all previous compactions
93      compactEquals(sfCreate(100,50,23,12,12), true, 23, 12, 12);
94      conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1);
95      conf.setFloat("hbase.hregion.majorcompaction.jitter", 0);
96      store.storeEngine.getCompactionPolicy().setConf(conf);
97      try {
98        // The modTime of the mocked store file is currentTimeMillis, so we need to increase the
99        // timestamp a bit to make sure that now - lowestModTime is greater than major compaction
100       // period(1ms).
101       // trigger an aged major compaction
102       List<StoreFile> candidates = sfCreate(50, 25, 12, 12);
103       edge.increment(2);
104       compactEquals(candidates, 50, 25, 12, 12);
105       // major sure exceeding maxCompactSize also downgrades aged minors
106       candidates = sfCreate(100, 50, 23, 12, 12);
107       edge.increment(2);
108       compactEquals(candidates, 23, 12, 12);
109     } finally {
110       conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
111       conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
112     }
113 
114     /* REFERENCES == file is from a region that was split */
115     // treat storefiles that have references like a major compaction
116     compactEquals(sfCreate(true, 100,50,25,12,12), 100, 50, 25, 12, 12);
117     // reference files shouldn't obey max threshold
118     compactEquals(sfCreate(true, tooBig, 12,12), tooBig, 12, 12);
119     // reference files should obey max file compact to avoid OOM
120     compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3);
121 
122     // empty case
123     compactEquals(new ArrayList<StoreFile>() /* empty */);
124     // empty case (because all files are too big)
125     compactEquals(sfCreate(tooBig, tooBig) /* empty */);
126   }
127 
128   @Test
129   public void testOffPeakCompactionRatio() throws IOException {
130     /*
131      * NOTE: these tests are specific to describe the implementation of the
132      * current compaction algorithm.  Developed to ensure that refactoring
133      * doesn't implicitly alter this.
134      */
135     // set an off-peak compaction threshold
136     this.conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 5.0F);
137     store.storeEngine.getCompactionPolicy().setConf(this.conf);
138     // Test with and without the flag.
139     compactEquals(sfCreate(999, 50, 12, 12, 1), false, true, 50, 12, 12, 1);
140     compactEquals(sfCreate(999, 50, 12, 12, 1), 12, 12, 1);
141   }
142 
143   @Test
144   public void testStuckStoreCompaction() throws IOException {
145     // Select the smallest compaction if the store is stuck.
146     compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,30), 30, 30, 30);
147     // If not stuck, standard policy applies.
148     compactEquals(sfCreate(99,99,99,99,99, 30,30,30,30), 99, 30, 30, 30, 30);
149 
150     // Add sufficiently small files to compaction, though
151     compactEquals(sfCreate(99,99,99,99,99,99, 30,30,30,15), 30, 30, 30, 15);
152     // Prefer earlier compaction to latter if the benefit is not significant
153     compactEquals(sfCreate(99,99,99,99, 30,26,26,29,25,25), 30, 26, 26);
154     // Prefer later compaction if the benefit is significant.
155     compactEquals(sfCreate(99,99,99,99, 27,27,27,20,20,20), 20, 20, 20);
156   }
157 
158   @Test
159   public void testCompactionEmptyHFile() throws IOException {
160     // Set TTL
161     ScanInfo oldScanInfo = store.getScanInfo();
162     ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getConfiguration(), oldScanInfo.getFamily(),
163         oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
164         oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
165         oldScanInfo.getComparator());
166     store.setScanInfo(newScanInfo);
167     // Do not compact empty store file
168     List<StoreFile> candidates = sfCreate(0);
169     for (StoreFile file : candidates) {
170       if (file instanceof MockStoreFile) {
171         MockStoreFile mockFile = (MockStoreFile) file;
172         mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
173         mockFile.setEntries(0);
174       }
175     }
176     // Test Default compactions
177     CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
178         .getCompactionPolicy()).selectCompaction(candidates,
179         new ArrayList<StoreFile>(), false, false, false);
180     Assert.assertTrue(result.getFiles().size() == 0);
181     store.setScanInfo(oldScanInfo);
182   }
183 }