View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertNotNull;
24  import static org.junit.Assert.assertTrue;
25  import static org.mockito.Matchers.any;
26  import static org.mockito.Matchers.eq;
27  import static org.mockito.Mockito.doThrow;
28  import static org.mockito.Mockito.mock;
29  import static org.mockito.Mockito.spy;
30  import static org.mockito.Mockito.when;
31  
32  import com.google.common.collect.ImmutableList;
33  import org.apache.hadoop.conf.Configuration;
34  import org.apache.hadoop.fs.FSDataOutputStream;
35  import org.apache.hadoop.fs.FileSystem;
36  import org.apache.hadoop.fs.Path;
37  import org.apache.hadoop.hbase.HBaseTestingUtility;
38  import org.apache.hadoop.hbase.HColumnDescriptor;
39  import org.apache.hadoop.hbase.HRegionInfo;
40  import org.apache.hadoop.hbase.HTableDescriptor;
41  import org.apache.hadoop.hbase.Stoppable;
42  import org.apache.hadoop.hbase.TableName;
43  import org.apache.hadoop.hbase.backup.FailedArchiveException;
44  import org.apache.hadoop.hbase.client.Put;
45  import org.apache.hadoop.hbase.testclassification.MediumTests;
46  import org.apache.hadoop.hbase.util.Bytes;
47  import org.apache.hadoop.hbase.util.FSUtils;
48  import org.apache.hadoop.hbase.wal.WALFactory;
49  import org.junit.After;
50  import org.junit.Before;
51  import org.junit.Test;
52  import org.junit.experimental.categories.Category;
53  
54  import java.io.IOException;
55  import java.util.ArrayList;
56  import java.util.Collection;
57  import java.util.List;
58  
59  /**
60   * Tests that archiving compacted files behaves correctly when encountering exceptions.
61   */
62  @Category(MediumTests.class)
63  public class TestCompactionArchiveIOException {
64    private static final String ERROR_FILE = "fffffffffffffffffdeadbeef";
65  
66    public HBaseTestingUtility testUtil;
67  
68    private Path testDir;
69  
70    @Before
71    public void setup() throws Exception {
72      testUtil = new HBaseTestingUtility();
73      testUtil.startMiniDFSCluster(1);
74      testDir = testUtil.getDataTestDirOnTestFS();
75      FSUtils.setRootDir(testUtil.getConfiguration(), testDir);
76    }
77  
78    @After
79    public void tearDown() throws Exception {
80      testUtil.cleanupTestDir();
81      testUtil.shutdownMiniDFSCluster();
82    }
83  
84    @Test
85    public void testRemoveCompactedFilesWithException() throws Exception {
86      byte[] fam = Bytes.toBytes("f");
87      byte[] col = Bytes.toBytes("c");
88      byte[] val = Bytes.toBytes("val");
89  
90      TableName tableName = TableName.valueOf(getClass().getSimpleName());
91      HTableDescriptor htd = new HTableDescriptor(tableName);
92      htd.addFamily(new HColumnDescriptor(fam));
93      HRegionInfo info = new HRegionInfo(tableName, null, null, false);
94      final HRegion region = initHRegion(htd, info);
95      RegionServerServices rss = mock(RegionServerServices.class);
96      List<Region> regions = new ArrayList<Region>();
97      regions.add(region);
98      when(rss.getOnlineRegions()).thenReturn(regions);
99  
100     // Create the cleaner object
101     final CompactedHFilesDischarger cleaner =
102         new CompactedHFilesDischarger(1000, (Stoppable) null, rss, false);
103     // Add some data to the region and do some flushes
104     int batchSize = 10;
105     int fileCount = 10;
106     for (int f = 0; f < fileCount; f++) {
107       int start = f * batchSize;
108       for (int i = start; i < start + batchSize; i++) {
109         Put p = new Put(Bytes.toBytes("row" + i));
110         p.addColumn(fam, col, val);
111         region.put(p);
112       }
113       // flush them
114       region.flush(true);
115     }
116 
117     HStore store = (HStore) region.getStore(fam);
118     assertEquals(fileCount, store.getStorefilesCount());
119 
120     Collection<StoreFile> storefiles = store.getStorefiles();
121     // None of the files should be in compacted state.
122     for (StoreFile file : storefiles) {
123       assertFalse(file.isCompactedAway());
124     }
125 
126     StoreFileManager fileManager = store.getStoreEngine().getStoreFileManager();
127     Collection<StoreFile> initialCompactedFiles = fileManager.getCompactedfiles();
128     assertTrue(initialCompactedFiles == null || initialCompactedFiles.isEmpty());
129 
130     // Do compaction
131     region.compact(true);
132 
133     // all prior store files should now be compacted
134     Collection<StoreFile> compactedFilesPreClean = fileManager.getCompactedfiles();
135     assertNotNull(compactedFilesPreClean);
136     assertTrue(compactedFilesPreClean.size() > 0);
137 
138     // add the dummy file to the store directory
139     HRegionFileSystem regionFS = region.getRegionFileSystem();
140     Path errFile = regionFS.getStoreFilePath(Bytes.toString(fam), ERROR_FILE);
141     FSDataOutputStream out = regionFS.getFileSystem().create(errFile);
142     out.writeInt(1);
143     out.close();
144 
145     StoreFile errStoreFile = new MockStoreFile(testUtil, errFile, 1, 0, false, 1);
146     fileManager.addCompactionResults(
147         ImmutableList.of(errStoreFile), ImmutableList.<StoreFile>of());
148 
149     // cleanup compacted files
150     cleaner.chore();
151 
152     // make sure the compacted files are cleared
153     Collection<StoreFile> compactedFilesPostClean = fileManager.getCompactedfiles();
154     assertEquals(1, compactedFilesPostClean.size());
155     for (StoreFile origFile : compactedFilesPreClean) {
156       assertFalse(compactedFilesPostClean.contains(origFile));
157     }
158 
159     // close the region
160     try {
161       region.close();
162     } catch (FailedArchiveException e) {
163       // expected due to errorfile
164       assertEquals(1, e.getFailedFiles().size());
165       assertEquals(ERROR_FILE, e.getFailedFiles().iterator().next().getName());
166     }
167   }
168 
169   private HRegion initHRegion(HTableDescriptor htd, HRegionInfo info)
170       throws IOException {
171     Configuration conf = testUtil.getConfiguration();
172     Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
173     Path regionDir = new Path(tableDir, info.getEncodedName());
174     Path storeDir = new Path(regionDir, htd.getColumnFamilies()[0].getNameAsString());
175 
176 
177     FileSystem errFS = spy(testUtil.getTestFileSystem());
178     // Prior to HBASE-16964, when an exception is thrown archiving any compacted file,
179     // none of the other files are cleared from the compactedfiles list.
180     // Simulate this condition with a dummy file
181     doThrow(new IOException("Error for test"))
182         .when(errFS).rename(eq(new Path(storeDir, ERROR_FILE)), any(Path.class));
183 
184     HRegionFileSystem fs = new HRegionFileSystem(conf, errFS, tableDir, info);
185     final Configuration walConf = new Configuration(conf);
186     FSUtils.setRootDir(walConf, tableDir);
187     final WALFactory wals = new WALFactory(walConf, null, "log_" + info.getEncodedName());
188     HRegion region =
189         new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes(), info.getTable().getNamespace()),
190             conf, htd, null);
191 
192     region.initialize();
193 
194     return region;
195   }
196 }