View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertTrue;
23  import java.io.IOException;
24  import java.util.List;
25  import org.apache.commons.logging.Log;
26  import org.apache.commons.logging.LogFactory;
27  import org.apache.hadoop.conf.Configuration;
28  import org.apache.hadoop.fs.FileSystem;
29  import org.apache.hadoop.fs.Path;
30  import org.apache.hadoop.hbase.HBaseTestingUtility;
31  import org.apache.hadoop.hbase.TableName;
32  import org.apache.hadoop.hbase.TableNameTestRule;
33  import org.apache.hadoop.hbase.client.Durability;
34  import org.apache.hadoop.hbase.client.Put;
35  import org.apache.hadoop.hbase.client.Result;
36  import org.apache.hadoop.hbase.client.ResultScanner;
37  import org.apache.hadoop.hbase.client.Scan;
38  import org.apache.hadoop.hbase.client.Table;
39  import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
40  import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
41  import org.apache.hadoop.hbase.coprocessor.ObserverContext;
42  import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
43  import org.apache.hadoop.hbase.testclassification.LargeTests;
44  import org.apache.hadoop.hbase.util.Bytes;
45  import org.junit.AfterClass;
46  import org.junit.BeforeClass;
47  import org.junit.Rule;
48  import org.junit.Test;
49  import org.junit.experimental.categories.Category;
50  
51  @Category(LargeTests.class)
52  public class TestScannerRetriableFailure {
53    private static final Log LOG = LogFactory.getLog(TestScannerRetriableFailure.class);
54  
55    private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
56  
57    private static final String FAMILY_NAME_STR = "f";
58    private static final byte[] FAMILY_NAME = Bytes.toBytes(FAMILY_NAME_STR);
59  
60    @Rule public TableNameTestRule testTable = new TableNameTestRule();
61  
62    public static class FaultyScannerObserver extends BaseRegionObserver {
63      private int faults = 0;
64  
65      @Override
66      public boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> e,
67          final InternalScanner s, final List<Result> results,
68          final int limit, final boolean hasMore) throws IOException {
69        final TableName tableName = e.getEnvironment().getRegionInfo().getTable();
70        if (!tableName.isSystemTable() && (faults++ % 2) == 0) {
71          LOG.debug(" Injecting fault in table=" + tableName + " scanner");
72          throw new IOException("injected fault");
73        }
74        return hasMore;
75      }
76    }
77  
78    private static void setupConf(Configuration conf) {
79      conf.setLong("hbase.hstore.compaction.min", 20);
80      conf.setLong("hbase.hstore.compaction.max", 39);
81      conf.setLong("hbase.hstore.blockingStoreFiles", 40);
82  
83      conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FaultyScannerObserver.class.getName());
84    }
85  
86    @BeforeClass
87    public static void setup() throws Exception {
88      setupConf(UTIL.getConfiguration());
89      UTIL.startMiniCluster(1);
90    }
91  
92    @AfterClass
93    public static void tearDown() throws Exception {
94      try {
95        UTIL.shutdownMiniCluster();
96      } catch (Exception e) {
97        LOG.warn("failure shutting down cluster", e);
98      }
99    }
100 
101   @Test(timeout=180000)
102   public void testFaultyScanner() throws Exception {
103     TableName tableName = testTable.getTableName();
104     Table table = UTIL.createTable(tableName, FAMILY_NAME);
105     try {
106       final int NUM_ROWS = 100;
107       loadTable(table, NUM_ROWS);
108       checkTableRows(table, NUM_ROWS);
109     } finally {
110       table.close();
111     }
112   }
113 
114   // ==========================================================================
115   //  Helpers
116   // ==========================================================================
117   private FileSystem getFileSystem() {
118     return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
119   }
120 
121   private Path getRootDir() {
122     return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
123   }
124 
125   public void loadTable(final Table table, int numRows) throws IOException {
126     for (int i = 0; i < numRows; ++i) {
127       byte[] row = Bytes.toBytes(String.format("%09d", i));
128       Put put = new Put(row);
129       put.setDurability(Durability.SKIP_WAL);
130       put.add(FAMILY_NAME, null, row);
131       table.put(put);
132     }
133   }
134 
135   private void checkTableRows(final Table table, int numRows) throws Exception {
136     Scan scan = new Scan();
137     scan.setCaching(1);
138     scan.setCacheBlocks(false);
139     ResultScanner scanner = table.getScanner(scan);
140     try {
141       int count = 0;
142       for (int i = 0; i < numRows; ++i) {
143         byte[] row = Bytes.toBytes(String.format("%09d", i));
144         Result result = scanner.next();
145         assertTrue(result != null);
146         assertTrue(Bytes.equals(row, result.getRow()));
147         count++;
148       }
149 
150       while (true) {
151         Result result = scanner.next();
152         if (result == null) {
153           break;
154         }
155         count++;
156       }
157       assertEquals(numRows, count);
158     } finally {
159       scanner.close();
160     }
161   }
162 }