View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.tool;
21  
22  import static org.junit.Assert.assertEquals;
23  import static org.junit.Assert.assertFalse;
24  import static org.junit.Assert.assertNotEquals;
25  import static org.junit.Assert.assertNotNull;
26  import static org.junit.Assert.assertTrue;
27  import static org.mockito.Matchers.anyLong;
28  import static org.mockito.Matchers.argThat;
29  import static org.mockito.Matchers.eq;
30  import static org.mockito.Matchers.isA;
31  import static org.mockito.Mockito.atLeastOnce;
32  import static org.mockito.Mockito.never;
33  import static org.mockito.Mockito.spy;
34  import static org.mockito.Mockito.times;
35  import static org.mockito.Mockito.verify;
36  
37  import java.util.List;
38  import java.util.Map;
39  import java.util.concurrent.ExecutorService;
40  import java.util.concurrent.ScheduledThreadPoolExecutor;
41  
42  import org.apache.hadoop.hbase.HBaseTestingUtility;
43  import org.apache.hadoop.hbase.HColumnDescriptor;
44  import org.apache.hadoop.hbase.HConstants;
45  import org.apache.hadoop.hbase.HRegionInfo;
46  import org.apache.hadoop.hbase.ServerName;
47  import org.apache.hadoop.hbase.TableName;
48  import org.apache.hadoop.hbase.client.HTable;
49  import org.apache.hadoop.hbase.client.Put;
50  import org.apache.hadoop.hbase.client.Table;
51  import org.apache.hadoop.hbase.testclassification.MediumTests;
52  import org.apache.hadoop.hbase.util.Bytes;
53  import org.apache.hadoop.util.ToolRunner;
54  import org.apache.log4j.Appender;
55  import org.apache.log4j.LogManager;
56  import org.apache.log4j.spi.LoggingEvent;
57  import org.junit.After;
58  import org.junit.Before;
59  import org.junit.Ignore;
60  import org.junit.Test;
61  import org.junit.experimental.categories.Category;
62  import org.junit.runner.RunWith;
63  import org.mockito.ArgumentMatcher;
64  import org.mockito.Mock;
65  import org.mockito.runners.MockitoJUnitRunner;
66  
67  import com.google.common.collect.Iterables;
68  
69  @RunWith(MockitoJUnitRunner.class)
70  @Category({MediumTests.class})
71  public class TestCanaryTool {
72  
73    private HBaseTestingUtility testingUtility;
74    private static final byte[] FAMILY = Bytes.toBytes("f");
75    private static final byte[] COLUMN = Bytes.toBytes("col");
76  
77    @Before
78    public void setUp() throws Exception {
79      testingUtility = new HBaseTestingUtility();
80      testingUtility.startMiniCluster();
81      LogManager.getRootLogger().addAppender(mockAppender);
82    }
83  
84    @After
85    public void tearDown() throws Exception {
86      testingUtility.shutdownMiniCluster();
87      LogManager.getRootLogger().removeAppender(mockAppender);
88    }
89  
90    @Mock
91    Appender mockAppender;
92  
93    @Test
94    public void testBasicZookeeperCanaryWorks() throws Exception {
95      final String[] args = { "-t", "10000", "-zookeeper" };
96      testZookeeperCanaryWithArgs(args);
97    }
98  
99    @Test
100   public void testZookeeperCanaryPermittedFailuresArgumentWorks() throws Exception {
101     final String[] args = { "-t", "10000", "-zookeeper", "-treatFailureAsError", "-permittedZookeeperFailures", "1" };
102     testZookeeperCanaryWithArgs(args);
103   }
104 
105   @Test
106   public void testBasicCanaryWorks() throws Exception {
107     TableName tableName = TableName.valueOf("testTable");
108     HTable table = testingUtility.createTable(tableName, new byte[][] { FAMILY });
109     // insert some test rows
110     for (int i=0; i<1000; i++) {
111       byte[] iBytes = Bytes.toBytes(i);
112       Put p = new Put(iBytes);
113       p.addColumn(FAMILY, COLUMN, iBytes);
114       table.put(p);
115     }
116     ExecutorService executor = new ScheduledThreadPoolExecutor(1);
117     CanaryTool.RegionStdOutSink sink = spy(new CanaryTool.RegionStdOutSink());
118     CanaryTool canary = new CanaryTool(executor, sink);
119     String[] args = { "-writeSniffing", "-t", "10000", tableName.getNameAsString() };
120     assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
121     assertEquals("verify no read error count", 0, canary.getReadFailures().size());
122     assertEquals("verify no write error count", 0, canary.getWriteFailures().size());
123     verify(sink, atLeastOnce()).publishReadTiming(isA(ServerName.class), isA(HRegionInfo.class), isA(HColumnDescriptor.class), anyLong());
124   }
125 
126   @Test
127   public void testCanaryRegionTaskResult() throws Exception {
128     TableName tableName = TableName.valueOf("testCanaryRegionTaskResult");
129     HTable table = testingUtility.createTable(tableName, new byte[][]{FAMILY});
130     // insert some test rows
131     for (int i = 0; i < 1000; i++) {
132       byte[] iBytes = Bytes.toBytes(i);
133       Put p = new Put(iBytes);
134       p.addColumn(FAMILY, COLUMN, iBytes);
135       table.put(p);
136     }
137     ExecutorService executor = new ScheduledThreadPoolExecutor(1);
138     CanaryTool.RegionStdOutSink sink = spy(new CanaryTool.RegionStdOutSink());
139     CanaryTool canary = new CanaryTool(executor, sink);
140     String[] args = { "-writeSniffing", "-t", "10000", "testCanaryRegionTaskResult" };
141     assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
142 
143     assertTrue("canary should expect to scan at least 1 region",
144       sink.getTotalExpectedRegions() > 0);
145     assertTrue("there should be no read failures", sink.getReadFailureCount() == 0);
146     assertTrue("there should be no write failures", sink.getWriteFailureCount() == 0);
147     assertTrue("verify read success count > 0", sink.getReadSuccessCount() > 0);
148     assertTrue("verify write success count > 0", sink.getWriteSuccessCount() > 0);
149     verify(sink, atLeastOnce()).publishReadTiming(isA(ServerName.class), isA(HRegionInfo.class),
150       isA(HColumnDescriptor.class), anyLong());
151     verify(sink, atLeastOnce()).publishWriteTiming(isA(ServerName.class), isA(HRegionInfo.class),
152       isA(HColumnDescriptor.class), anyLong());
153 
154     assertEquals("canary region success count should equal total expected regions",
155       sink.getReadSuccessCount() + sink.getWriteSuccessCount(), sink.getTotalExpectedRegions());
156     Map<String, List<CanaryTool.RegionTaskResult>> regionMap = sink.getRegionMap();
157     assertFalse("verify region map has size > 0", regionMap.isEmpty());
158 
159     for (String regionName : regionMap.keySet()) {
160       for (CanaryTool.RegionTaskResult res: regionMap.get(regionName)) {
161         assertNotNull("verify getRegionNameAsString()", regionName);
162         assertNotNull("verify getRegionInfo()", res.getRegionInfo());
163         assertNotNull("verify getTableName()", res.getTableName());
164         assertNotNull("verify getTableNameAsString()", res.getTableNameAsString());
165         assertNotNull("verify getServerName()", res.getServerName());
166         assertNotNull("verify getServerNameAsString()", res.getServerNameAsString());
167         assertNotNull("verify getColumnFamily()", res.getColumnFamily());
168         assertNotNull("verify getColumnFamilyNameAsString()", res.getColumnFamilyNameAsString());
169 
170         if (regionName.contains(CanaryTool.DEFAULT_WRITE_TABLE_NAME.getNameAsString())) {
171           assertTrue("write to region " + regionName + " succeeded", res.isWriteSuccess());
172           assertTrue("write took some time", res.getWriteLatency() > -1);
173         } else {
174           assertTrue("read from region " + regionName + " succeeded", res.isReadSuccess());
175           assertTrue("read took some time", res.getReadLatency() > -1);
176         }
177       }
178     }
179   }
180 
181   @Test
182   @Ignore("Intermittent argument matching failures, see HBASE-18813")
183   public void testReadTableTimeouts() throws Exception {
184     final TableName [] tableNames = new TableName[2];
185     tableNames[0] = TableName.valueOf("testReadTableTimeouts1");
186     tableNames[1] = TableName.valueOf("testReadTableTimeouts2");
187     // Create 2 test tables.
188     for (int j = 0; j<2; j++) {
189       Table table = testingUtility.createTable(tableNames[j], new byte[][] { FAMILY });
190       // insert some test rows
191       for (int i=0; i<1000; i++) {
192         byte[] iBytes = Bytes.toBytes(i + j);
193         Put p = new Put(iBytes);
194         p.addColumn(FAMILY, COLUMN, iBytes);
195         table.put(p);
196       }
197     }
198     ExecutorService executor = new ScheduledThreadPoolExecutor(1);
199     CanaryTool.RegionStdOutSink sink = spy(new CanaryTool.RegionStdOutSink());
200     CanaryTool canary = new CanaryTool(executor, sink);
201     String configuredTimeoutStr = tableNames[0].getNameAsString() + "=" + Long.MAX_VALUE + "," +
202       tableNames[1].getNameAsString() + "=0";
203     String[] args = { "-readTableTimeouts", configuredTimeoutStr, tableNames[0].getNameAsString(), tableNames[1].getNameAsString()};
204     assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
205     verify(sink, times(tableNames.length)).initializeAndGetReadLatencyForTable(isA(String.class));
206     for (int i=0; i<2; i++) {
207       assertNotEquals("verify non-null read latency", null, sink.getReadLatencyMap().get(tableNames[i].getNameAsString()));
208       assertNotEquals("verify non-zero read latency", 0L, sink.getReadLatencyMap().get(tableNames[i].getNameAsString()));
209     }
210     // One table's timeout is set for 0 ms and thus, should lead to an error.
211     verify(mockAppender, times(1)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
212       @Override
213       public boolean matches(Object argument) {
214         return ((LoggingEvent) argument).getRenderedMessage().contains("exceeded the configured read timeout.");
215       }
216     }));
217     verify(mockAppender, times(2)).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
218       @Override
219       public boolean matches(Object argument) {
220         return ((LoggingEvent) argument).getRenderedMessage().contains("Configured read timeout");
221       }
222     }));
223   }
224 
225   @Test
226   @Ignore("Intermittent argument matching failures, see HBASE-18813")
227   public void testWriteTableTimeout() throws Exception {
228     ExecutorService executor = new ScheduledThreadPoolExecutor(1);
229     CanaryTool.RegionStdOutSink sink = spy(new CanaryTool.RegionStdOutSink());
230     CanaryTool canary = new CanaryTool(executor, sink);
231     String[] args = { "-writeSniffing", "-writeTableTimeout", String.valueOf(Long.MAX_VALUE)};
232     assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
233     assertNotEquals("verify non-null write latency", null, sink.getWriteLatency());
234     assertNotEquals("verify non-zero write latency", 0L, sink.getWriteLatency());
235     verify(mockAppender, times(1)).doAppend(argThat(
236         new ArgumentMatcher<LoggingEvent>() {
237           @Override
238           public boolean matches(Object argument) {
239             return ((LoggingEvent) argument).getRenderedMessage().contains("Configured write timeout");
240           }
241         }));
242   }
243 
244   //no table created, so there should be no regions
245   @Test
246   public void testRegionserverNoRegions() throws Exception {
247     runRegionserverCanary();
248     verify(mockAppender).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
249       @Override
250       public boolean matches(Object argument) {
251         return ((LoggingEvent) argument).getRenderedMessage().contains("Regionserver not serving any regions");
252       }
253     }));
254   }
255 
256   //by creating a table, there shouldn't be any region servers not serving any regions
257   @Test
258   public void testRegionserverWithRegions() throws Exception {
259     TableName tableName = TableName.valueOf("testTable");
260     testingUtility.createTable(tableName, new byte[][] { FAMILY });
261     runRegionserverCanary();
262     verify(mockAppender, never()).doAppend(argThat(new ArgumentMatcher<LoggingEvent>() {
263       @Override
264       public boolean matches(Object argument) {
265         return ((LoggingEvent) argument).getRenderedMessage().contains("Regionserver not serving any regions");
266       }
267     }));
268   }
269 
270   @Test
271   public void testRawScanConfig() throws Exception {
272     TableName tableName = TableName.valueOf("testTableRawScan");
273     Table table = testingUtility.createTable(tableName, new byte[][] { FAMILY });
274     // insert some test rows
275     for (int i=0; i<1000; i++) {
276       byte[] iBytes = Bytes.toBytes(i);
277       Put p = new Put(iBytes);
278       p.addColumn(FAMILY, COLUMN, iBytes);
279       table.put(p);
280     }
281     ExecutorService executor = new ScheduledThreadPoolExecutor(1);
282     CanaryTool.RegionStdOutSink sink = spy(new CanaryTool.RegionStdOutSink());
283     CanaryTool canary = new CanaryTool(executor, sink);
284     String[] args = { "-t", "10000", "testTableRawScan" };
285     org.apache.hadoop.conf.Configuration conf =
286       new org.apache.hadoop.conf.Configuration(testingUtility.getConfiguration());
287     conf.setBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, true);
288     assertEquals(0, ToolRunner.run(conf, canary, args));
289     verify(sink, atLeastOnce())
290         .publishReadTiming(isA(ServerName.class), isA(HRegionInfo.class), isA(HColumnDescriptor.class), anyLong());
291     assertEquals("verify no read error count", 0, canary.getReadFailures().size());
292   }
293 
294   private void runRegionserverCanary() throws Exception {
295     ExecutorService executor = new ScheduledThreadPoolExecutor(1);
296     CanaryTool canary = new CanaryTool(executor, new CanaryTool.RegionServerStdOutSink());
297     String[] args = { "-t", "10000", "-regionserver"};
298     assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
299     assertEquals("verify no read error count", 0, canary.getReadFailures().size());
300   }
301 
302   private void testZookeeperCanaryWithArgs(String[] args) throws Exception {
303     Integer port =
304       Iterables.getOnlyElement(testingUtility.getZkCluster().getClientPortList(), null);
305     testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM,
306       "localhost:" + port + "/hbase");
307     ExecutorService executor = new ScheduledThreadPoolExecutor(2);
308     CanaryTool.ZookeeperStdOutSink sink = spy(new CanaryTool.ZookeeperStdOutSink());
309     CanaryTool canary = new CanaryTool(executor, sink);
310     assertEquals(0, ToolRunner.run(testingUtility.getConfiguration(), canary, args));
311 
312     String baseZnode = testingUtility.getConfiguration()
313       .get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
314     verify(sink, atLeastOnce())
315       .publishReadTiming(eq(baseZnode), eq("localhost:" + port), anyLong());
316   }
317 }