View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.mapreduce;
19  
20  import static java.lang.String.format;
21  import static org.junit.Assert.assertEquals;
22  import static org.junit.Assert.assertFalse;
23  import static org.junit.Assert.assertTrue;
24  
25  import java.io.IOException;
26  import java.util.Arrays;
27  import java.util.HashMap;
28  import java.util.Iterator;
29  import java.util.Map;
30  import java.util.Set;
31  import java.util.TreeSet;
32  import java.util.UUID;
33  
34  import org.apache.commons.logging.Log;
35  import org.apache.commons.logging.LogFactory;
36  import org.apache.hadoop.conf.Configurable;
37  import org.apache.hadoop.conf.Configuration;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.Path;
40  import org.apache.hadoop.hbase.Cell;
41  import org.apache.hadoop.hbase.HBaseConfiguration;
42  import org.apache.hadoop.hbase.IntegrationTestingUtility;
43  import org.apache.hadoop.hbase.client.HTable;
44  import org.apache.hadoop.hbase.testclassification.IntegrationTests;
45  import org.apache.hadoop.hbase.KeyValue;
46  import org.apache.hadoop.hbase.KeyValue.Type;
47  import org.apache.hadoop.hbase.TableName;
48  import org.apache.hadoop.hbase.client.Result;
49  import org.apache.hadoop.hbase.client.Scan;
50  import org.apache.hadoop.hbase.client.Table;
51  import org.apache.hadoop.hbase.util.Bytes;
52  import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
53  import org.apache.hadoop.util.GenericOptionsParser;
54  import org.apache.hadoop.util.Tool;
55  import org.apache.hadoop.util.ToolRunner;
56  import org.junit.AfterClass;
57  import org.junit.BeforeClass;
58  import org.junit.Test;
59  import org.junit.experimental.categories.Category;
60  
61  /**
62   * Validate ImportTsv + LoadIncrementalHFiles on a distributed cluster.
63   */
64  @Category(IntegrationTests.class)
65  public class IntegrationTestImportTsv implements Configurable, Tool {
66  
67    private static final String NAME = IntegrationTestImportTsv.class.getSimpleName();
68    private static final Log LOG = LogFactory.getLog(IntegrationTestImportTsv.class);
69  
70    protected static final String simple_tsv =
71        "row1\t1\tc1\tc2\n" +
72        "row2\t1\tc1\tc2\n" +
73        "row3\t1\tc1\tc2\n" +
74        "row4\t1\tc1\tc2\n" +
75        "row5\t1\tc1\tc2\n" +
76        "row6\t1\tc1\tc2\n" +
77        "row7\t1\tc1\tc2\n" +
78        "row8\t1\tc1\tc2\n" +
79        "row9\t1\tc1\tc2\n" +
80        "row10\t1\tc1\tc2\n";
81  
82    protected static final Set<KeyValue> simple_expected =
83        new TreeSet<KeyValue>(KeyValue.COMPARATOR) {
84      private static final long serialVersionUID = 1L;
85      {
86        byte[] family = Bytes.toBytes("d");
87        for (String line : simple_tsv.split("\n")) {
88          String[] row = line.split("\t");
89          byte[] key = Bytes.toBytes(row[0]);
90          long ts = Long.parseLong(row[1]);
91          byte[][] fields = { Bytes.toBytes(row[2]), Bytes.toBytes(row[3]) };
92          add(new KeyValue(key, family, fields[0], ts, Type.Put, fields[0]));
93          add(new KeyValue(key, family, fields[1], ts, Type.Put, fields[1]));
94        }
95      }
96    };
97  
98    // this instance is initialized on first access when the test is run from
99    // JUnit/Maven or by main when run from the CLI.
100   protected static IntegrationTestingUtility util = null;
101 
102   public Configuration getConf() {
103     return util.getConfiguration();
104   }
105 
106   public void setConf(Configuration conf) {
107     throw new IllegalArgumentException("setConf not supported");
108   }
109 
110   @BeforeClass
111   public static void provisionCluster() throws Exception {
112     if (null == util) {
113       util = new IntegrationTestingUtility();
114     }
115     util.initializeCluster(1);
116     if (!util.isDistributedCluster()) {
117       // also need MR when running without a real cluster
118       util.startMiniMapReduceCluster();
119     }
120   }
121 
122   @AfterClass
123   public static void releaseCluster() throws Exception {
124     util.restoreCluster();
125     if (!util.isDistributedCluster()) {
126       util.shutdownMiniMapReduceCluster();
127     }
128     util = null;
129   }
130 
131   /**
132    * Verify the data described by <code>simple_tsv</code> matches
133    * <code>simple_expected</code>.
134    */
135   protected void doLoadIncrementalHFiles(Path hfiles, TableName tableName)
136       throws Exception {
137 
138     String[] args = { hfiles.toString(), tableName.getNameAsString() };
139     LOG.info(format("Running LoadIncrememntalHFiles with args: %s", Arrays.asList(args)));
140     assertEquals("Loading HFiles failed.",
141       0, ToolRunner.run(new LoadIncrementalHFiles(new Configuration(getConf())), args));
142 
143     Table table = null;
144     Scan scan = new Scan() {{
145       setCacheBlocks(false);
146       setCaching(1000);
147     }};
148     try {
149       table = new HTable(getConf(), tableName);
150       Iterator<Result> resultsIt = table.getScanner(scan).iterator();
151       Iterator<KeyValue> expectedIt = simple_expected.iterator();
152       while (resultsIt.hasNext() && expectedIt.hasNext()) {
153         Result r = resultsIt.next();
154         for (Cell actual : r.rawCells()) {
155           assertTrue(
156             "Ran out of expected values prematurely!",
157             expectedIt.hasNext());
158           KeyValue expected = expectedIt.next();
159           assertTrue(
160             format("Scan produced surprising result. expected: <%s>, actual: %s",
161               expected, actual),
162             KeyValue.COMPARATOR.compare(expected, actual) == 0);
163         }
164       }
165       assertFalse("Did not consume all expected values.", expectedIt.hasNext());
166       assertFalse("Did not consume all scan results.", resultsIt.hasNext());
167     } finally {
168       if (null != table) table.close();
169     }
170   }
171 
172   /**
173    * Confirm the absence of the {@link TotalOrderPartitioner} partitions file.
174    */
175   protected static void validateDeletedPartitionsFile(Configuration conf) throws IOException {
176     if (!conf.getBoolean(IntegrationTestingUtility.IS_DISTRIBUTED_CLUSTER, false))
177       return;
178 
179     FileSystem fs = FileSystem.get(conf);
180     Path partitionsFile = new Path(TotalOrderPartitioner.getPartitionFile(conf));
181     assertFalse("Failed to clean up partitions file.", fs.exists(partitionsFile));
182   }
183 
184   @Test
185   public void testGenerateAndLoad() throws Exception {
186     LOG.info("Running test testGenerateAndLoad.");
187     TableName table = TableName.valueOf(NAME + "-" + UUID.randomUUID());
188     String cf = "d";
189     Path hfiles = new Path(
190         util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles");
191 
192 
193     Map<String, String> args = new HashMap<String, String>();
194     args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString());
195     args.put(ImportTsv.COLUMNS_CONF_KEY,
196         format("HBASE_ROW_KEY,HBASE_TS_KEY,%s:c1,%s:c2", cf, cf));
197     // configure the test harness to NOT delete the HFiles after they're
198     // generated. We need those for doLoadIncrementalHFiles
199     args.put(TestImportTsv.DELETE_AFTER_LOAD_CONF, "false");
200 
201     // run the job, complete the load.
202     util.createTable(table, new String[]{cf});
203     Tool t = TestImportTsv.doMROnTableTest(util, table.getNameAsString(), cf, simple_tsv, args);
204     doLoadIncrementalHFiles(hfiles, table);
205 
206     // validate post-conditions
207     validateDeletedPartitionsFile(t.getConf());
208 
209     // clean up after ourselves.
210     util.deleteTable(table);
211     util.cleanupDataTestDirOnTestFS(table.getNameAsString());
212     LOG.info("testGenerateAndLoad completed successfully.");
213   }
214 
215   public int run(String[] args) throws Exception {
216     if (args.length != 0) {
217       System.err.println(format("%s [genericOptions]", NAME));
218       System.err.println("  Runs ImportTsv integration tests against a distributed cluster.");
219       System.err.println();
220       GenericOptionsParser.printGenericCommandUsage(System.err);
221       return 1;
222     }
223 
224     // adding more test methods? Don't forget to add them here... or consider doing what
225     // IntegrationTestsDriver does.
226     provisionCluster();
227     testGenerateAndLoad();
228     releaseCluster();
229 
230     return 0;
231   }
232 
233   public static void main(String[] args) throws Exception {
234     Configuration conf = HBaseConfiguration.create();
235     IntegrationTestingUtility.setUseDistributedCluster(conf);
236     util = new IntegrationTestingUtility(conf);
237     // not using ToolRunner to avoid unnecessary call to setConf()
238     args = new GenericOptionsParser(conf, args).getRemainingArgs();
239     int status = new IntegrationTestImportTsv().run(args);
240     System.exit(status);
241   }
242 }