View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.fs;
19  
20  import java.lang.reflect.Field;
21  import org.apache.hadoop.conf.Configuration;
22  import org.apache.hadoop.fs.FSDataOutputStream;
23  import org.apache.hadoop.fs.FileSystem;
24  import org.apache.hadoop.fs.Path;
25  import org.apache.hadoop.hbase.HBaseTestingUtility;
26  import org.apache.hadoop.hbase.HConstants;
27  import org.apache.hadoop.hbase.MiniHBaseCluster;
28  import org.apache.hadoop.hbase.testclassification.LargeTests;
29  import org.apache.hadoop.hbase.testclassification.MiscTests;
30  import org.apache.hadoop.hbase.wal.DefaultWALProvider;
31  import org.apache.hadoop.hdfs.DFSClient;
32  import org.apache.hadoop.hdfs.DistributedFileSystem;
33  import org.apache.hadoop.hdfs.MiniDFSCluster;
34  import org.apache.hadoop.hdfs.protocol.ClientProtocol;
35  import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
36  import org.junit.After;
37  import org.junit.Assert;
38  import org.junit.Before;
39  import org.junit.Rule;
40  import org.junit.Test;
41  import org.junit.experimental.categories.Category;
42  import org.junit.rules.TestName;
43  
44  /**
45   * Tests for the hdfs fix from HBASE-6435.
46   *
47   * Please don't add new subtest which involves starting / stopping MiniDFSCluster in this class.
48   * When stopping MiniDFSCluster, shutdown hooks would be cleared in hadoop's ShutdownHookManager
49   *   in hadoop 3.
50   * This leads to 'Failed suppression of fs shutdown hook' error in region server.
51   */
52  @Category({MiscTests.class, LargeTests.class})
53  public class TestBlockReorderBlockLocation {
54  
55    private Configuration conf;
56    private MiniDFSCluster cluster;
57    private HBaseTestingUtility htu;
58    private DistributedFileSystem dfs;
59    private static final String host1 = "host1";
60    private static final String host2 = "host2";
61    private static final String host3 = "host3";
62  
63    @Rule
64    public TestName name = new TestName();
65  
66    @Before
67    public void setUp() throws Exception {
68      htu = new HBaseTestingUtility();
69      htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
70      htu.getConfiguration().setInt("dfs.replication", 3);
71      htu.startMiniDFSCluster(3,
72          new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});
73  
74      conf = htu.getConfiguration();
75      cluster = htu.getDFSCluster();
76      dfs = (DistributedFileSystem) FileSystem.get(conf);
77    }
78  
79    @After
80    public void tearDownAfterClass() throws Exception {
81      htu.shutdownMiniCluster();
82    }
83  
84  
85    private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception {
86      Field nf = DFSClient.class.getDeclaredField("namenode");
87      nf.setAccessible(true);
88      return (ClientProtocol) nf.get(dfsc);
89    }
90  
91    /**
92     * Test that the reorder algo works as we expect.
93     */
94    @Test
95    public void testBlockLocation() throws Exception {
96      // We need to start HBase to get  HConstants.HBASE_DIR set in conf
97      htu.startMiniZKCluster();
98      MiniHBaseCluster hbm = htu.startMiniHBaseCluster(1, 1);
99      conf = hbm.getConfiguration();
100 
101 
102     // The "/" is mandatory, without it we've got a null pointer exception on the namenode
103     final String fileName = "/helloWorld";
104     Path p = new Path(fileName);
105 
106     final int repCount = 3;
107     Assert.assertTrue((short) cluster.getDataNodes().size() >= repCount);
108 
109     // Let's write the file
110     FSDataOutputStream fop = dfs.create(p, (short) repCount);
111     final double toWrite = 875.5613;
112     fop.writeDouble(toWrite);
113     fop.close();
114 
115     for (int i=0; i<10; i++){
116       // The interceptor is not set in this test, so we get the raw list at this point
117       LocatedBlocks l;
118       final long max = System.currentTimeMillis() + 10000;
119       do {
120         l = getNamenode(dfs.getClient()).getBlockLocations(fileName, 0, 1);
121         Assert.assertNotNull(l.getLocatedBlocks());
122         Assert.assertEquals(1, l.getLocatedBlocks().size());
123         Assert.assertTrue("Expecting " + repCount + " , got " + l.get(0).getLocations().length,
124             System.currentTimeMillis() < max);
125       } while (l.get(0).getLocations().length != repCount);
126 
127       // Should be filtered, the name is different => The order won't change
128       Object[] originalList = l.getLocatedBlocks().toArray();
129       HFileSystem.ReorderWALBlocks lrb = new HFileSystem.ReorderWALBlocks();
130       lrb.reorderBlocks(conf, l, fileName);
131       Assert.assertArrayEquals(originalList, l.getLocatedBlocks().toArray());
132 
133       // Should be reordered, as we pretend to be a file name with a compliant stuff
134       Assert.assertNotNull(conf.get(HConstants.HBASE_DIR));
135       Assert.assertFalse(conf.get(HConstants.HBASE_DIR).isEmpty());
136       String pseudoLogFile = conf.get(HConstants.HBASE_DIR) + "/" +
137           HConstants.HREGION_LOGDIR_NAME + "/" + host1 + ",6977,6576" + "/mylogfile";
138 
139       // Check that it will be possible to extract a ServerName from our construction
140       Assert.assertNotNull("log= " + pseudoLogFile,
141         DefaultWALProvider.getServerNameFromWALDirectoryName(dfs.getConf(), pseudoLogFile));
142 
143       // And check we're doing the right reorder.
144       lrb.reorderBlocks(conf, l, pseudoLogFile);
145       Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
146 
147       // Check again, it should remain the same.
148       lrb.reorderBlocks(conf, l, pseudoLogFile);
149       Assert.assertEquals(host1, l.get(0).getLocations()[2].getHostName());
150     }
151   }
152 }