View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.hbtop;
19  
20  import static org.junit.Assert.assertEquals;
21  import static org.junit.Assert.fail;
22  
23  import com.google.common.collect.Lists;
24  import com.google.common.collect.Maps;
25  import com.google.common.collect.Sets;
26  import com.google.protobuf.ByteString;
27  import java.text.ParseException;
28  import java.util.List;
29  import java.util.Map;
30  import java.util.Set;
31  import org.apache.commons.lang3.time.FastDateFormat;
32  import org.apache.hadoop.hbase.ClusterStatus;
33  import org.apache.hadoop.hbase.HRegionInfo;
34  import org.apache.hadoop.hbase.ServerLoad;
35  import org.apache.hadoop.hbase.ServerName;
36  import org.apache.hadoop.hbase.TableName;
37  import org.apache.hadoop.hbase.hbtop.field.Field;
38  import org.apache.hadoop.hbase.hbtop.field.Size;
39  import org.apache.hadoop.hbase.hbtop.field.Size.Unit;
40  import org.apache.hadoop.hbase.hbtop.screen.top.Summary;
41  import org.apache.hadoop.hbase.master.RegionState;
42  import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
43  import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
44  
45  public final class TestUtils {
46  
47    static final String HBASE_VERSION = "1.5.0-SNAPSHOT";
48    static final String CLUSTER_UUID = "01234567-89ab-cdef-0123-456789abcdef";
49  
50    private TestUtils() { }
51  
52    public static ClusterStatus createDummyClusterStatus() {
53      Map<ServerName, ServerLoad> serverLoads = Maps.newHashMap();
54      List<ServerName> deadServers = Lists.newArrayList();
55      Set<RegionState> rit = Sets.newHashSet();
56  
57      ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1);
58  
59      serverLoads.put(host1,
60        createServerLoad(100,
61          new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100,
62          Lists.newArrayList(
63            createRegionLoad("table1,,1.00000000000000000000000000000000.", 100, 100,
64              new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
65              new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00"),
66            createRegionLoad("table2,1,2.00000000000000000000000000000001.", 200, 200,
67              new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
68              new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01"),
69            createRegionLoad(
70              "namespace:table3,,3_0001.00000000000000000000000000000002.", 300, 300,
71              new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
72              new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02"))));
73  
74      ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2);
75  
76      serverLoads.put(host2,
77        createServerLoad(200,
78          new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200,
79          Lists.newArrayList(
80            createRegionLoad("table1,1,4.00000000000000000000000000000003.", 100, 100,
81              new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
82              new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03"),
83            createRegionLoad("table2,,5.00000000000000000000000000000004.", 200, 200,
84              new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
85              new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04"),
86            createRegionLoad("namespace:table3,,6.00000000000000000000000000000005.", 300, 300,
87              new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
88              new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05"))));
89  
90      ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3);
91  
92      deadServers.add(host3);
93  
94      rit.add(new RegionState(new HRegionInfo(0, TableName.valueOf("table4"), 0),
95        RegionState.State.OFFLINE, host3));
96  
97      return new ClusterStatus(HBASE_VERSION, CLUSTER_UUID, serverLoads, deadServers, null, null,
98        rit, new String[0], true);
99    }
100 
101   private static ClusterStatusProtos.RegionLoad createRegionLoad(String regionName,
102     long readRequestCount, long writeRequestCount, Size storeFileSize,
103     Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality,
104     long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) {
105     FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss");
106     try {
107       return ClusterStatusProtos.RegionLoad.newBuilder()
108         .setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder()
109           .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
110           .setValue(ByteString.copyFromUtf8(regionName)).build())
111         .setReadRequestsCount(readRequestCount)
112         .setWriteRequestsCount(writeRequestCount)
113         .setStorefileSizeMB((int)storeFileSize.get(Unit.MEGABYTE))
114         .setStoreUncompressedSizeMB((int)uncompressedStoreFileSize.get(Unit.MEGABYTE))
115         .setStorefiles(storeFileCount)
116         .setMemstoreSizeMB((int)memStoreSize.get(Unit.MEGABYTE))
117         .setDataLocality(locality)
118         .setCurrentCompactedKVs(compactedCellCount)
119         .setTotalCompactingKVs(compactingCellCount)
120         .setLastMajorCompactionTs(df.parse(lastMajorCompactionTime).getTime())
121         .build();
122     } catch (ParseException e) {
123       throw new IllegalArgumentException(e);
124     }
125   }
126 
127   private static ServerLoad createServerLoad(long reportTimestamp,
128     Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond,
129     List<ClusterStatusProtos.RegionLoad> regionLoads) {
130     return new ServerLoad(ClusterStatusProtos.ServerLoad.newBuilder()
131         .setReportStartTime(reportTimestamp)
132         .setReportEndTime(reportTimestamp)
133         .setUsedHeapMB((int)usedHeapSize.get(Unit.MEGABYTE))
134         .setMaxHeapMB((int)maxHeapSize.get(Unit.MEGABYTE))
135         .setNumberOfRequests(requestCountPerSecond)
136         .addAllRegionLoads(regionLoads)
137         .build());
138   }
139 
140   public static void assertRecordsInRegionMode(List<Record> records) {
141     assertEquals(6, records.size());
142 
143     for (Record record : records) {
144       switch (record.get(Field.REGION_NAME).asString()) {
145         case "table1,,1.00000000000000000000000000000000.":
146           assertRecordInRegionMode(record, "default", "1", "", "table1",
147             "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1",
148             0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
149             new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f,
150             "2019-07-22 00:00:00");
151           break;
152 
153         case "table1,1,4.00000000000000000000000000000003.":
154           assertRecordInRegionMode(record, "default", "4", "", "table1",
155             "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2",
156             0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1,
157             new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f,
158             "2019-07-22 00:00:03");
159           break;
160 
161         case "table2,,5.00000000000000000000000000000004.":
162           assertRecordInRegionMode(record, "default", "5", "", "table2",
163             "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2",
164             0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
165             new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f,
166             "2019-07-22 00:00:04");
167           break;
168 
169         case "table2,1,2.00000000000000000000000000000001.":
170           assertRecordInRegionMode(record, "default", "2", "", "table2",
171             "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1",
172             0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2,
173             new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f,
174             "2019-07-22 00:00:01");
175           break;
176 
177         case "namespace:table3,,6.00000000000000000000000000000005.":
178           assertRecordInRegionMode(record, "namespace", "6", "", "table3",
179             "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2",
180             0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
181             new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f,
182             "2019-07-22 00:00:05");
183           break;
184 
185         case "namespace:table3,,3_0001.00000000000000000000000000000002.":
186           assertRecordInRegionMode(record, "namespace", "3", "1", "table3",
187             "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1",
188             0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3,
189             new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f,
190             "2019-07-22 00:00:02");
191           break;
192 
193         default:
194           fail();
195       }
196     }
197   }
198 
199   private static void assertRecordInRegionMode(Record record, String namespace, String startCode,
200     String replicaId, String table, String region, String regionServer, String longRegionServer,
201     long requestCountPerSecond, long readRequestCountPerSecond, long writeCountRequestPerSecond,
202     Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles,
203     Size memStoreSize, float locality, String startKey, long compactingCellCount,
204     long compactedCellCount, float compactionProgress, String lastMajorCompactionTime) {
205     assertEquals(21, record.size());
206     assertEquals(namespace, record.get(Field.NAMESPACE).asString());
207     assertEquals(startCode, record.get(Field.START_CODE).asString());
208     assertEquals(replicaId, record.get(Field.REPLICA_ID).asString());
209     assertEquals(table, record.get(Field.TABLE).asString());
210     assertEquals(region, record.get(Field.REGION).asString());
211     assertEquals(regionServer, record.get(Field.REGION_SERVER).asString());
212     assertEquals(longRegionServer, record.get(Field.LONG_REGION_SERVER).asString());
213     assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
214     assertEquals(readRequestCountPerSecond,
215       record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
216     assertEquals(writeCountRequestPerSecond,
217       record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
218     assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
219     assertEquals(uncompressedStoreFileSize,
220       record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
221     assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
222     assertEquals(record.get(Field.MEM_STORE_SIZE).asSize(), memStoreSize);
223     assertEquals(locality, record.get(Field.LOCALITY).asFloat(), 0.001);
224     assertEquals(startKey, record.get(Field.START_KEY).asString());
225     assertEquals(compactingCellCount, record.get(Field.COMPACTING_CELL_COUNT).asLong());
226     assertEquals(compactedCellCount, record.get(Field.COMPACTED_CELL_COUNT).asLong());
227     assertEquals(compactionProgress, record.get(Field.COMPACTION_PROGRESS).asFloat(), 0.001);
228     assertEquals(lastMajorCompactionTime,
229       record.get(Field.LAST_MAJOR_COMPACTION_TIME).asString());
230   }
231 
232   public static void assertRecordsInNamespaceMode(List<Record> records) {
233     assertEquals(2, records.size());
234 
235     for (Record record : records) {
236       switch (record.get(Field.NAMESPACE).asString()) {
237         case "default":
238           assertRecordInNamespaceMode(record, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE),
239             new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 4);
240           break;
241 
242         case "namespace":
243           assertRecordInNamespaceMode(record, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE),
244             new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 2);
245           break;
246 
247         default:
248           fail();
249       }
250     }
251   }
252 
253   private static void assertRecordInNamespaceMode(Record record, long requestCountPerSecond,
254     long readRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize,
255     Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) {
256     assertEquals(9, record.size());
257     assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
258     assertEquals(readRequestCountPerSecond,
259       record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
260     assertEquals(writeCountRequestPerSecond,
261       record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
262     assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
263     assertEquals(uncompressedStoreFileSize,
264       record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
265     assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
266     assertEquals(memStoreSize, record.get(Field.MEM_STORE_SIZE).asSize());
267     assertEquals(regionCount, record.get(Field.REGION_COUNT).asInt());
268   }
269 
270   public static void assertRecordsInTableMode(List<Record> records) {
271     assertEquals(3, records.size());
272 
273     for (Record record : records) {
274       String tableName = String.format("%s:%s", record.get(Field.NAMESPACE).asString(),
275         record.get(Field.TABLE).asString());
276 
277       switch (tableName) {
278         case "default:table1":
279           assertRecordInTableMode(record, 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE),
280             new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 2);
281           break;
282 
283         case "default:table2":
284           assertRecordInTableMode(record, 0L, 0L, 0L, new Size(400, Size.Unit.MEGABYTE),
285             new Size(800, Size.Unit.MEGABYTE), 4, new Size(400, Size.Unit.MEGABYTE), 2);
286           break;
287 
288         case "namespace:table3":
289           assertRecordInTableMode(record, 0L, 0L, 0L, new Size(600, Size.Unit.MEGABYTE),
290             new Size(1200, Size.Unit.MEGABYTE), 6, new Size(600, Size.Unit.MEGABYTE), 2);
291           break;
292 
293         default:
294           fail();
295       }
296     }
297   }
298 
299   private static void assertRecordInTableMode(Record record, long requestCountPerSecond,
300     long readRequestCountPerSecond,  long writeCountRequestPerSecond, Size storeFileSize,
301     Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) {
302     assertEquals(10, record.size());
303     assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
304     assertEquals(readRequestCountPerSecond,
305       record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
306     assertEquals(writeCountRequestPerSecond,
307       record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
308     assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
309     assertEquals(uncompressedStoreFileSize,
310       record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
311     assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
312     assertEquals(memStoreSize, record.get(Field.MEM_STORE_SIZE).asSize());
313     assertEquals(regionCount, record.get(Field.REGION_COUNT).asInt());
314   }
315 
316   public static void assertRecordsInRegionServerMode(List<Record> records) {
317     assertEquals(2, records.size());
318 
319     for (Record record : records) {
320       switch (record.get(Field.REGION_SERVER).asString()) {
321         case "host1:1000":
322           assertRecordInRegionServerMode(record, "host1.apache.com,1000,1", 0L, 0L, 0L,
323             new Size(600, Size.Unit.MEGABYTE), new Size(1200, Size.Unit.MEGABYTE), 6,
324             new Size(600, Size.Unit.MEGABYTE), 3, new Size(100, Size.Unit.MEGABYTE),
325             new Size(200, Size.Unit.MEGABYTE));
326           break;
327 
328         case "host2:1001":
329           assertRecordInRegionServerMode(record, "host2.apache.com,1001,2", 0L, 0L, 0L,
330             new Size(600, Size.Unit.MEGABYTE), new Size(1200, Size.Unit.MEGABYTE), 6,
331             new Size(600, Size.Unit.MEGABYTE), 3, new Size(16, Size.Unit.GIGABYTE),
332             new Size(32, Size.Unit.GIGABYTE));
333           break;
334 
335         default:
336           fail();
337       }
338     }
339   }
340 
341   private static void assertRecordInRegionServerMode(Record record, String longRegionServer,
342     long requestCountPerSecond, long readRequestCountPerSecond, long writeCountRequestPerSecond,
343     Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles,
344     Size memStoreSize, int regionCount, Size usedHeapSize, Size maxHeapSize) {
345     assertEquals(12, record.size());
346     assertEquals(longRegionServer, record.get(Field.LONG_REGION_SERVER).asString());
347     assertEquals(requestCountPerSecond, record.get(Field.REQUEST_COUNT_PER_SECOND).asLong());
348     assertEquals(readRequestCountPerSecond,
349       record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong());
350     assertEquals(writeCountRequestPerSecond,
351       record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong());
352     assertEquals(storeFileSize, record.get(Field.STORE_FILE_SIZE).asSize());
353     assertEquals(uncompressedStoreFileSize,
354       record.get(Field.UNCOMPRESSED_STORE_FILE_SIZE).asSize());
355     assertEquals(numStoreFiles, record.get(Field.NUM_STORE_FILES).asInt());
356     assertEquals(memStoreSize, record.get(Field.MEM_STORE_SIZE).asSize());
357     assertEquals(regionCount, record.get(Field.REGION_COUNT).asInt());
358     assertEquals(usedHeapSize, record.get(Field.USED_HEAP_SIZE).asSize());
359     assertEquals(maxHeapSize, record.get(Field.MAX_HEAP_SIZE).asSize());
360   }
361 
362   public static void assertSummary(Summary summary) {
363     assertEquals(HBASE_VERSION, summary.getVersion());
364     assertEquals(CLUSTER_UUID, summary.getClusterId());
365     assertEquals(3, summary.getServers());
366     assertEquals(2, summary.getLiveServers());
367     assertEquals(1, summary.getDeadServers());
368     assertEquals(6, summary.getRegionCount());
369     assertEquals(1, summary.getRitCount());
370     assertEquals(3.0, summary.getAverageLoad(), 0.001);
371     assertEquals(300L, summary.getAggregateRequestPerSecond());
372   }
373 }