View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertTrue;
23  import static org.junit.Assert.fail;
24  import static org.mockito.Mockito.mock;
25  import static org.mockito.Mockito.when;
26  
27  import java.io.IOException;
28  import java.util.ArrayList;
29  import java.util.Collection;
30  import java.util.List;
31  
32  import org.apache.hadoop.conf.Configuration;
33  import org.apache.hadoop.fs.FileSystem;
34  import org.apache.hadoop.fs.Path;
35  import org.apache.hadoop.hbase.Cell;
36  import org.apache.hadoop.hbase.CellUtil;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HColumnDescriptor;
39  import org.apache.hadoop.hbase.HConstants;
40  import org.apache.hadoop.hbase.HRegionInfo;
41  import org.apache.hadoop.hbase.HTableDescriptor;
42  import org.apache.hadoop.hbase.testclassification.SmallTests;
43  import org.apache.hadoop.hbase.Stoppable;
44  import org.apache.hadoop.hbase.TableName;
45  import org.apache.hadoop.hbase.client.Durability;
46  import org.apache.hadoop.hbase.client.Get;
47  import org.apache.hadoop.hbase.client.Put;
48  import org.apache.hadoop.hbase.client.Result;
49  import org.apache.hadoop.hbase.wal.WALFactory;
50  import org.apache.hadoop.hbase.util.Bytes;
51  import org.apache.hadoop.hbase.util.FSUtils;
52  import org.apache.hadoop.hbase.util.StoppableImplementation;
53  import org.junit.Before;
54  import org.junit.Test;
55  import org.junit.experimental.categories.Category;
56  
57  @Category(SmallTests.class)
58  public class TestStoreFileRefresherChore {
59  
60    private HBaseTestingUtility TEST_UTIL;
61    private Path testDir;
62  
63    @Before
64    public void setUp() throws IOException {
65      TEST_UTIL = new HBaseTestingUtility();
66      testDir = TEST_UTIL.getDataTestDir("TestStoreFileRefresherChore");
67      FSUtils.setRootDir(TEST_UTIL.getConfiguration(), testDir);
68    }
69  
70    private HTableDescriptor getTableDesc(TableName tableName, byte[]... families) {
71      HTableDescriptor htd = new HTableDescriptor(tableName);
72      for (byte[] family : families) {
73        HColumnDescriptor hcd = new HColumnDescriptor(family);
74        // Set default to be three versions.
75        hcd.setMaxVersions(Integer.MAX_VALUE);
76        htd.addFamily(hcd);
77      }
78      return htd;
79    }
80  
81    static class FailingHRegionFileSystem extends HRegionFileSystem {
82      boolean fail = false;
83      FailingHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir, HRegionInfo regionInfo) {
84        super(conf, fs, tableDir, regionInfo);
85      }
86  
87      @Override
88      public Collection<StoreFileInfo> getStoreFiles(String familyName) throws IOException {
89        if (fail) {
90          throw new IOException("simulating FS failure");
91        }
92        return super.getStoreFiles(familyName);
93      }
94    }
95  
96    private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
97        throws IOException {
98      Configuration conf = TEST_UTIL.getConfiguration();
99      Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
100 
101     HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);
102 
103     HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir,
104       info);
105     final Configuration walConf = new Configuration(conf);
106     FSUtils.setRootDir(walConf, tableDir);
107     final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
108     HRegion region =
109         new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
110             conf, htd, null);
111 
112     region.initialize();
113 
114     return region;
115   }
116 
117   private void putData(Region region, int startRow, int numRows, byte[] qf, byte[]... families)
118       throws IOException {
119     for (int i = startRow; i < startRow + numRows; i++) {
120       Put put = new Put(Bytes.toBytes("" + i));
121       put.setDurability(Durability.SKIP_WAL);
122       for (byte[] family : families) {
123         put.add(family, qf, null);
124       }
125       region.put(put);
126     }
127   }
128 
129   private void verifyDataExpectFail(Region newReg, int startRow, int numRows, byte[] qf,
130       byte[]... families) throws IOException {
131     boolean threw = false;
132     try {
133       verifyData(newReg, startRow, numRows, qf, families);
134     } catch (AssertionError e) {
135       threw = true;
136     }
137     if (!threw) {
138       fail("Expected data verification to fail");
139     }
140   }
141 
142   private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families)
143       throws IOException {
144     for (int i = startRow; i < startRow + numRows; i++) {
145       byte[] row = Bytes.toBytes("" + i);
146       Get get = new Get(row);
147       for (byte[] family : families) {
148         get.addColumn(family, qf);
149       }
150       Result result = newReg.get(get);
151       Cell[] raw = result.rawCells();
152       assertEquals(families.length, result.size());
153       for (int j = 0; j < families.length; j++) {
154         assertTrue(CellUtil.matchingRow(raw[j], row));
155         assertTrue(CellUtil.matchingFamily(raw[j], families[j]));
156         assertTrue(CellUtil.matchingQualifier(raw[j], qf));
157       }
158     }
159   }
160 
161   static class StaleStorefileRefresherChore extends StorefileRefresherChore {
162     boolean isStale = false;
163     public StaleStorefileRefresherChore(int period, HRegionServer regionServer,
164         Stoppable stoppable) {
165       super(period, false, regionServer, stoppable);
166     }
167     @Override
168     protected boolean isRegionStale(String encodedName, long time) {
169       return isStale;
170     }
171   }
172 
173   @Test
174   public void testIsStale() throws IOException {
175     int period = 0;
176     byte[][] families = new byte[][] {Bytes.toBytes("cf")};
177     byte[] qf = Bytes.toBytes("cq");
178 
179     HRegionServer regionServer = mock(HRegionServer.class);
180     List<Region> regions = new ArrayList<Region>();
181     when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions);
182     when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
183 
184     HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families);
185     htd.setRegionReplication(2);
186 
187     Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
188     Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1);
189     regions.add(primary);
190     regions.add(replica1);
191 
192     StaleStorefileRefresherChore chore = new StaleStorefileRefresherChore(period, regionServer, new StoppableImplementation());
193 
194     // write some data to primary and flush
195     putData(primary, 0, 100, qf, families);
196     primary.flush(true);
197     verifyData(primary, 0, 100, qf, families);
198 
199     verifyDataExpectFail(replica1, 0, 100, qf, families);
200     chore.chore();
201     verifyData(replica1, 0, 100, qf, families);
202 
203     // simulate an fs failure where we cannot refresh the store files for the replica
204     ((FailingHRegionFileSystem)((HRegion)replica1).getRegionFileSystem()).fail = true;
205 
206     // write some more data to primary and flush
207     putData(primary, 100, 100, qf, families);
208     primary.flush(true);
209     verifyData(primary, 0, 200, qf, families);
210 
211     chore.chore(); // should not throw ex, but we cannot refresh the store files
212 
213     verifyData(replica1, 0, 100, qf, families);
214     verifyDataExpectFail(replica1, 100, 100, qf, families);
215 
216     chore.isStale = true;
217     chore.chore(); //now after this, we cannot read back any value
218     try {
219       verifyData(replica1, 0, 100, qf, families);
220       fail("should have failed with IOException");
221     } catch(IOException ex) {
222       // expected
223     }
224   }
225 }