1
2
3
4
5
6
7
8
9
10
11 package org.apache.hadoop.hbase.regionserver.throttle;
12
13 import static org.junit.Assert.assertEquals;
14 import static org.junit.Assert.assertTrue;
15
16 import java.io.IOException;
17 import java.util.List;
18 import java.util.Random;
19 import java.util.concurrent.TimeUnit;
20
21 import org.apache.commons.logging.Log;
22 import org.apache.commons.logging.LogFactory;
23 import org.apache.hadoop.conf.Configuration;
24 import org.apache.hadoop.hbase.HBaseTestingUtility;
25 import org.apache.hadoop.hbase.HColumnDescriptor;
26 import org.apache.hadoop.hbase.HTableDescriptor;
27 import org.apache.hadoop.hbase.MiniHBaseCluster;
28 import org.apache.hadoop.hbase.TableName;
29 import org.apache.hadoop.hbase.client.Connection;
30 import org.apache.hadoop.hbase.client.ConnectionFactory;
31 import org.apache.hadoop.hbase.client.Put;
32 import org.apache.hadoop.hbase.client.Table;
33 import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
34 import org.apache.hadoop.hbase.regionserver.HRegionServer;
35 import org.apache.hadoop.hbase.regionserver.Region;
36 import org.apache.hadoop.hbase.regionserver.Store;
37 import org.apache.hadoop.hbase.regionserver.StoreEngine;
38 import org.apache.hadoop.hbase.regionserver.StripeStoreEngine;
39 import org.apache.hadoop.hbase.testclassification.MediumTests;
40 import org.apache.hadoop.hbase.util.Bytes;
41 import org.apache.hadoop.hbase.util.Pair;
42 import org.apache.hadoop.hbase.util.JVMClusterUtil;
43 import org.junit.After;
44 import org.junit.Before;
45 import org.junit.Rule;
46 import org.junit.Test;
47 import org.junit.experimental.categories.Category;
48 import org.junit.rules.TestName;
49
50 @Category(MediumTests.class)
51 public class TestFlushWithThroughputController {
52 private static final Log LOG = LogFactory.getLog(TestFlushWithThroughputController.class);
53 private static final double EPSILON = 1E-6;
54
55 private HBaseTestingUtility hbtu;
56 @Rule public TestName testName = new TestName();
57 private TableName tableName;
58 private final byte[] family = Bytes.toBytes("f");
59 private final byte[] qualifier = Bytes.toBytes("q");
60
61 @Before
62 public void setUp() {
63 hbtu = new HBaseTestingUtility();
64 tableName = TableName.valueOf("Table-" + testName.getMethodName());
65 hbtu.getConfiguration().set(
66 FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
67 PressureAwareFlushThroughputController.class.getName());
68 }
69
70 @After
71 public void tearDown() throws Exception {
72 hbtu.shutdownMiniCluster();
73 }
74
75 private Store getStoreWithName(TableName tableName) {
76 MiniHBaseCluster cluster = hbtu.getMiniHBaseCluster();
77 List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
78 for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
79 HRegionServer hrs = rsts.get(i).getRegionServer();
80 for (Region region : hrs.getOnlineRegions(tableName)) {
81 return region.getStores().iterator().next();
82 }
83 }
84 return null;
85 }
86
87 private void setMaxMinThroughputs(long max, long min) {
88 Configuration conf = hbtu.getConfiguration();
89 conf.setLong(
90 PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_LOWER_BOUND, min);
91 conf.setLong(
92 PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_MAX_THROUGHPUT_UPPER_BOUND, max);
93 }
94
95
96
97
98
99 private Pair<Double, Long> generateAndFlushData(Table table) throws IOException {
100
101
102 final int NUM_FLUSHES = 3, NUM_PUTS = 50, VALUE_SIZE = 200 * 1024;
103 Random rand = new Random();
104 long duration = 0;
105 for (int i = 0; i < NUM_FLUSHES; i++) {
106
107 for (int j = 0; j < NUM_PUTS; j++) {
108 byte[] value = new byte[VALUE_SIZE];
109 rand.nextBytes(value);
110 table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
111 }
112 long startTime = System.nanoTime();
113 hbtu.getHBaseAdmin().flush(tableName);
114 duration += System.nanoTime() - startTime;
115 }
116 Store store = getStoreWithName(tableName);
117 assertEquals(NUM_FLUSHES, store.getStorefilesCount());
118 double throughput = (double)store.getStorefilesSize()
119 / TimeUnit.NANOSECONDS.toSeconds(duration);
120 return new Pair<>(throughput, duration);
121 }
122
123 private long testFlushWithThroughputLimit() throws Exception {
124 final long throughputLimit = 1024 * 1024;
125 setMaxMinThroughputs(throughputLimit, throughputLimit);
126 Configuration conf = hbtu.getConfiguration();
127 conf.setLong(
128 PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_CONTROL_CHECK_INTERVAL,
129 throughputLimit);
130 hbtu.startMiniCluster(1);
131 Table table = hbtu.createTable(tableName, family);
132 Pair<Double, Long> result = generateAndFlushData(table);
133 hbtu.deleteTable(tableName);
134 LOG.debug("Throughput is: " + (result.getFirst() / 1024 / 1024) + " MB/s");
135
136
137 assertTrue(result.getFirst() < throughputLimit * 1.2);
138 assertTrue(result.getFirst() > throughputLimit * 0.8);
139 return result.getSecond();
140 }
141
142 @Test
143 public void testFlushControl() throws Exception {
144 testFlushWithThroughputLimit();
145 }
146
147
148
149
150 @Test
151 public void testFlushThroughputTuning() throws Exception {
152 Configuration conf = hbtu.getConfiguration();
153 setMaxMinThroughputs(20L * 1024 * 1024, 10L * 1024 * 1024);
154 conf.set(StoreEngine.STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName());
155 conf.setInt(PressureAwareFlushThroughputController.HBASE_HSTORE_FLUSH_THROUGHPUT_TUNE_PERIOD,
156 3000);
157 hbtu.startMiniCluster(1);
158 Connection conn = ConnectionFactory.createConnection(conf);
159 HTableDescriptor htd = new HTableDescriptor(tableName);
160 htd.addFamily(new HColumnDescriptor(family));
161 htd.setCompactionEnabled(false);
162 hbtu.getHBaseAdmin().createTable(htd);
163 hbtu.waitTableAvailable(tableName);
164 HRegionServer regionServer = hbtu.getRSForFirstRegionInTable(tableName);
165 PressureAwareFlushThroughputController throughputController =
166 (PressureAwareFlushThroughputController) regionServer.getFlushThroughputController();
167 for (Region region : regionServer.getOnlineRegions()) {
168 region.flush(true);
169 }
170 assertEquals(0.0, regionServer.getFlushPressure(), EPSILON);
171 Thread.sleep(5000);
172 assertEquals(10L * 1024 * 1024, throughputController.getMaxThroughput(), EPSILON);
173 Table table = conn.getTable(tableName);
174 Random rand = new Random();
175 for (int i = 0; i < 10; i++) {
176 for (int j = 0; j < 10; j++) {
177 byte[] value = new byte[256 * 1024];
178 rand.nextBytes(value);
179 table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
180 }
181 }
182 Thread.sleep(5000);
183 double expectedThroughPut = 10L * 1024 * 1024 * (1 + regionServer.getFlushPressure());
184 assertEquals(expectedThroughPut, throughputController.getMaxThroughput(), EPSILON);
185
186 conf.set(FlushThroughputControllerFactory.HBASE_FLUSH_THROUGHPUT_CONTROLLER_KEY,
187 NoLimitThroughputController.class.getName());
188 regionServer.onConfigurationChange(conf);
189 assertTrue(throughputController.isStopped());
190 assertTrue(regionServer.getFlushThroughputController() instanceof NoLimitThroughputController);
191 conn.close();
192 }
193
194
195
196
197 @Test
198 public void testFlushControlForStripedStore() throws Exception {
199 hbtu.getConfiguration().set(StoreEngine.STORE_ENGINE_CLASS_KEY,
200 StripeStoreEngine.class.getName());
201 testFlushWithThroughputLimit();
202 }
203 }