1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 package org.apache.hadoop.hbase.util;
18
19 import java.io.IOException;
20 import java.io.InterruptedIOException;
21 import java.lang.reflect.Constructor;
22 import java.net.InetAddress;
23 import java.security.SecureRandom;
24 import java.util.ArrayList;
25 import java.util.Arrays;
26 import java.util.List;
27 import java.util.Locale;
28 import java.util.Properties;
29 import java.util.Random;
30 import java.util.concurrent.atomic.AtomicReference;
31
32 import javax.crypto.spec.SecretKeySpec;
33
34 import org.apache.commons.cli.CommandLine;
35 import org.apache.commons.logging.Log;
36 import org.apache.commons.logging.LogFactory;
37 import org.apache.hadoop.conf.Configuration;
38 import org.apache.hadoop.hbase.HBaseConfiguration;
39 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
40 import org.apache.hadoop.hbase.HBaseTestingUtility;
41 import org.apache.hadoop.hbase.HColumnDescriptor;
42 import org.apache.hadoop.hbase.HConstants;
43 import org.apache.hadoop.hbase.HTableDescriptor;
44 import org.apache.hadoop.hbase.TableName;
45 import org.apache.hadoop.hbase.classification.InterfaceAudience;
46 import org.apache.log4j.Level;
47 import org.apache.log4j.LogManager;
48 import org.apache.zookeeper.ZooKeeper;
49 import org.slf4j.Logger;
50 import org.slf4j.LoggerFactory;
51 import org.apache.hadoop.hbase.client.Admin;
52 import org.apache.hadoop.hbase.client.ConnectionFactory;
53 import org.apache.hadoop.hbase.client.Durability;
54 import org.apache.hadoop.hbase.client.HBaseAdmin;
55 import org.apache.hadoop.hbase.io.compress.Compression;
56 import org.apache.hadoop.hbase.io.crypto.Cipher;
57 import org.apache.hadoop.hbase.io.crypto.Encryption;
58 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
59 import org.apache.hadoop.hbase.regionserver.BloomType;
60 import org.apache.hadoop.hbase.security.EncryptionUtil;
61 import org.apache.hadoop.hbase.security.User;
62 import org.apache.hadoop.hbase.security.access.AccessControlClient;
63 import org.apache.hadoop.hbase.security.access.Permission;
64 import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator;
65 import org.apache.hadoop.hbase.util.test.LoadTestDataGeneratorWithACL;
66 import org.apache.hadoop.security.SecurityUtil;
67 import org.apache.hadoop.security.UserGroupInformation;
68 import org.apache.hadoop.util.ToolRunner;
69
70
71
72
73
74
75 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
76 public class LoadTestTool extends AbstractHBaseTool {
77
78 private static final Log LOG = LogFactory.getLog(LoadTestTool.class);
79 private static final String COLON = ":";
80
81
82 private TableName tableName;
83
84
85 private byte[][] families;
86
87
88 protected static final String DEFAULT_TABLE_NAME = "cluster_test";
89
90
91 public static byte[] DEFAULT_COLUMN_FAMILY = Bytes.toBytes("test_cf");
92
93
94 public static final byte[][] DEFAULT_COLUMN_FAMILIES = { DEFAULT_COLUMN_FAMILY };
95
96
97 protected static final int DEFAULT_DATA_SIZE = 64;
98
99
100 protected static final int DEFAULT_NUM_THREADS = 20;
101
102
103 protected static final String OPT_USAGE_LOAD =
104 "<avg_cols_per_key>:<avg_data_size>" +
105 "[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
106
107
108 protected static final String OPT_USAGE_READ =
109 "<verify_percent>[:<#threads=" + DEFAULT_NUM_THREADS + ">]";
110
111
112 protected static final String OPT_USAGE_UPDATE =
113 "<update_percent>[:<#threads=" + DEFAULT_NUM_THREADS
114 + ">][:<#whether to ignore nonce collisions=0>]";
115
116 protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " +
117 Arrays.toString(BloomType.values());
118
119 protected static final String OPT_USAGE_COMPRESSION = "Compression type, " +
120 "one of " + Arrays.toString(Compression.Algorithm.values());
121
122 public static final String OPT_DATA_BLOCK_ENCODING_USAGE =
123 "Encoding algorithm (e.g. prefix "
124 + "compression) to use for data blocks in the test column family, "
125 + "one of " + Arrays.toString(DataBlockEncoding.values()) + ".";
126
127 protected static final String OPT_VERBOSE = "verbose";
128
129 public static final String OPT_BLOOM = "bloom";
130 public static final String OPT_COMPRESSION = "compression";
131 public static final String OPT_DEFERRED_LOG_FLUSH = "deferredlogflush";
132 public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush.";
133
134 public static final String OPT_DATA_BLOCK_ENCODING =
135 HColumnDescriptor.DATA_BLOCK_ENCODING.toLowerCase(Locale.ROOT);
136
137 public static final String OPT_INMEMORY = "in_memory";
138 public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " +
139 "inmemory as far as possible. Not guaranteed that reads are always served from inmemory";
140
141 public static final String OPT_GENERATOR = "generator";
142 public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool."
143 + " Any args for this class can be passed as colon separated after class name";
144
145 public static final String OPT_WRITER = "writer";
146 public static final String OPT_WRITER_USAGE = "The class for executing the write requests";
147
148 public static final String OPT_UPDATER = "updater";
149 public static final String OPT_UPDATER_USAGE = "The class for executing the update requests";
150
151 public static final String OPT_READER = "reader";
152 public static final String OPT_READER_USAGE = "The class for executing the read requests";
153
154 protected static final String OPT_KEY_WINDOW = "key_window";
155 protected static final String OPT_WRITE = "write";
156 protected static final String OPT_MAX_READ_ERRORS = "max_read_errors";
157 public static final String OPT_MULTIPUT = "multiput";
158 public static final String OPT_MULTIGET = "multiget_batchsize";
159 protected static final String OPT_NUM_KEYS = "num_keys";
160 protected static final String OPT_READ = "read";
161 protected static final String OPT_START_KEY = "start_key";
162 public static final String OPT_TABLE_NAME = "tn";
163 public static final String OPT_COLUMN_FAMILIES = "families";
164 protected static final String OPT_ZK_QUORUM = "zk";
165 protected static final String OPT_ZK_PARENT_NODE = "zk_root";
166 protected static final String OPT_SKIP_INIT = "skip_init";
167 protected static final String OPT_INIT_ONLY = "init_only";
168 protected static final String NUM_TABLES = "num_tables";
169 protected static final String OPT_BATCHUPDATE = "batchupdate";
170 protected static final String OPT_UPDATE = "update";
171
172 public static final String OPT_ENCRYPTION = "encryption";
173 protected static final String OPT_ENCRYPTION_USAGE =
174 "Enables transparent encryption on the test table, one of " +
175 Arrays.toString(Encryption.getSupportedCiphers());
176
177 public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server";
178 protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE
179 = "Desired number of regions per region server. Defaults to 5.";
180 public static int DEFAULT_NUM_REGIONS_PER_SERVER = 5;
181
182 public static final String OPT_REGION_REPLICATION = "region_replication";
183 protected static final String OPT_REGION_REPLICATION_USAGE =
184 "Desired number of replicas per region";
185
186 public static final String OPT_REGION_REPLICA_ID = "region_replica_id";
187 protected static final String OPT_REGION_REPLICA_ID_USAGE =
188 "Region replica id to do the reads from";
189
190 protected static final long DEFAULT_START_KEY = 0;
191
192
193 protected CommandLine cmd;
194
195 protected MultiThreadedWriter writerThreads = null;
196 protected MultiThreadedReader readerThreads = null;
197 protected MultiThreadedUpdater updaterThreads = null;
198
199 protected long startKey, endKey;
200
201 protected boolean isVerbose, isWrite, isRead, isUpdate;
202 protected boolean deferredLogFlush;
203
204
205 protected DataBlockEncoding dataBlockEncodingAlgo;
206 protected Compression.Algorithm compressAlgo;
207 protected BloomType bloomType;
208 private boolean inMemoryCF;
209
210 private User userOwner;
211
212 protected int numWriterThreads = DEFAULT_NUM_THREADS;
213 protected int minColsPerKey, maxColsPerKey;
214 protected int minColDataSize = DEFAULT_DATA_SIZE, maxColDataSize = DEFAULT_DATA_SIZE;
215 protected boolean isMultiPut;
216
217
218 protected int numUpdaterThreads = DEFAULT_NUM_THREADS;
219 protected int updatePercent;
220 protected boolean ignoreConflicts = false;
221 protected boolean isBatchUpdate;
222
223
224 private int numReaderThreads = DEFAULT_NUM_THREADS;
225 private int keyWindow = MultiThreadedReader.DEFAULT_KEY_WINDOW;
226 private int multiGetBatchSize = MultiThreadedReader.DEFAULT_BATCH_SIZE;
227 private int maxReadErrors = MultiThreadedReader.DEFAULT_MAX_ERRORS;
228 private int verifyPercent;
229
230 private int numTables = 1;
231
232 private String superUser;
233
234 private String userNames;
235
236 private String authnFileName;
237
238 private int numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER;
239 private int regionReplication = -1;
240 private int regionReplicaId = -1;
241
242
243
244 protected boolean isSkipInit = false;
245 protected boolean isInitOnly = false;
246
247 protected Cipher cipher = null;
248
249 protected String[] splitColonSeparated(String option,
250 int minNumCols, int maxNumCols) {
251 String optVal = cmd.getOptionValue(option);
252 String[] cols = optVal.split(COLON);
253 if (cols.length < minNumCols || cols.length > maxNumCols) {
254 throw new IllegalArgumentException("Expected at least "
255 + minNumCols + " columns but no more than " + maxNumCols +
256 " in the colon-separated value '" + optVal + "' of the " +
257 "-" + option + " option");
258 }
259 return cols;
260 }
261
262 protected int getNumThreads(String numThreadsStr) {
263 return parseInt(numThreadsStr, 1, Short.MAX_VALUE);
264 }
265
266 public byte[][] getColumnFamilies() {
267 return families;
268 }
269
270
271
272
273
274 protected void applyColumnFamilyOptions(TableName tableName,
275 byte[][] columnFamilies) throws IOException {
276 Admin admin = new HBaseAdmin(conf);
277 HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
278 LOG.info("Disabling table " + tableName);
279 admin.disableTable(tableName);
280 for (byte[] cf : columnFamilies) {
281 HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
282 boolean isNewCf = columnDesc == null;
283 if (isNewCf) {
284 columnDesc = new HColumnDescriptor(cf);
285 }
286 if (bloomType != null) {
287 columnDesc.setBloomFilterType(bloomType);
288 }
289 if (compressAlgo != null) {
290 columnDesc.setCompressionType(compressAlgo);
291 }
292 if (dataBlockEncodingAlgo != null) {
293 columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
294 }
295 if (inMemoryCF) {
296 columnDesc.setInMemory(inMemoryCF);
297 }
298 if (cipher != null) {
299 byte[] keyBytes = new byte[cipher.getKeyLength()];
300 new SecureRandom().nextBytes(keyBytes);
301 columnDesc.setEncryptionType(cipher.getName());
302 columnDesc.setEncryptionKey(EncryptionUtil.wrapKey(conf,
303 User.getCurrent().getShortName(),
304 new SecretKeySpec(keyBytes, cipher.getName())));
305 }
306 if (isNewCf) {
307 admin.addColumn(tableName, columnDesc);
308 } else {
309 admin.modifyColumn(tableName, columnDesc);
310 }
311 }
312 LOG.info("Enabling table " + tableName);
313 admin.enableTable(tableName);
314 admin.close();
315 }
316
317 @Override
318 protected void addOptions() {
319 addOptNoArg("v", OPT_VERBOSE, "Will display a full readout of logs, including ZooKeeper");
320 addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " +
321 "without port numbers");
322 addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper");
323 addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write");
324 addOptWithArg(OPT_COLUMN_FAMILIES, "The name of the column families to use separated by comma");
325 addOptWithArg(OPT_WRITE, OPT_USAGE_LOAD);
326 addOptWithArg(OPT_READ, OPT_USAGE_READ);
327 addOptWithArg(OPT_UPDATE, OPT_USAGE_UPDATE);
328 addOptNoArg(OPT_INIT_ONLY, "Initialize the test table only, don't do any loading");
329 addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM);
330 addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION);
331 addOptWithArg(OPT_DATA_BLOCK_ENCODING, OPT_DATA_BLOCK_ENCODING_USAGE);
332 addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " +
333 "to tolerate before terminating all reader threads. The default is " +
334 MultiThreadedReader.DEFAULT_MAX_ERRORS + ".");
335 addOptWithArg(OPT_MULTIGET, "Whether to use multi-gets as opposed to " +
336 "separate gets for every column in a row");
337 addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " +
338 "reads and writes for concurrent write/read workload. The default " +
339 "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + ".");
340
341 addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " +
342 "separate puts for every column in a row");
343 addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " +
344 "separate updates for every column in a row");
345 addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY);
346 addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE);
347 addOptWithArg(OPT_WRITER, OPT_WRITER_USAGE);
348 addOptWithArg(OPT_UPDATER, OPT_UPDATER_USAGE);
349 addOptWithArg(OPT_READER, OPT_READER_USAGE);
350
351 addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write");
352 addOptWithArg(OPT_START_KEY, "The first key to read/write " +
353 "(a 0-based index). The default value is " +
354 DEFAULT_START_KEY + ".");
355 addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table "
356 + "already exists");
357
358 addOptWithArg(NUM_TABLES,
359 "A positive integer number. When a number n is speicfied, load test "
360 + "tool will load n table parallely. -tn parameter value becomes "
361 + "table name prefix. Each table name is in format <tn>_1...<tn>_n");
362
363 addOptWithArg(OPT_ENCRYPTION, OPT_ENCRYPTION_USAGE);
364 addOptNoArg(OPT_DEFERRED_LOG_FLUSH, OPT_DEFERRED_LOG_FLUSH_USAGE);
365 addOptWithArg(OPT_NUM_REGIONS_PER_SERVER, OPT_NUM_REGIONS_PER_SERVER_USAGE);
366 addOptWithArg(OPT_REGION_REPLICATION, OPT_REGION_REPLICATION_USAGE);
367 addOptWithArg(OPT_REGION_REPLICA_ID, OPT_REGION_REPLICA_ID_USAGE);
368 }
369
370 @Override
371 protected void processOptions(CommandLine cmd) {
372 this.cmd = cmd;
373
374 tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME,
375 DEFAULT_TABLE_NAME));
376
377 if (cmd.hasOption(OPT_COLUMN_FAMILIES)) {
378 String[] list = cmd.getOptionValue(OPT_COLUMN_FAMILIES).split(",");
379 families = new byte[list.length][];
380 for (int i = 0; i < list.length; i++) {
381 families[i] = Bytes.toBytes(list[i]);
382 }
383 } else {
384 families = DEFAULT_COLUMN_FAMILIES;
385 }
386
387 isVerbose = cmd.hasOption(OPT_VERBOSE);
388 isWrite = cmd.hasOption(OPT_WRITE);
389 isRead = cmd.hasOption(OPT_READ);
390 isUpdate = cmd.hasOption(OPT_UPDATE);
391 isInitOnly = cmd.hasOption(OPT_INIT_ONLY);
392 deferredLogFlush = cmd.hasOption(OPT_DEFERRED_LOG_FLUSH);
393
394 if (!isWrite && !isRead && !isUpdate && !isInitOnly) {
395 throw new IllegalArgumentException("Either -" + OPT_WRITE + " or " +
396 "-" + OPT_UPDATE + " or -" + OPT_READ + " has to be specified");
397 }
398
399 if (isInitOnly && (isRead || isWrite || isUpdate)) {
400 throw new IllegalArgumentException(OPT_INIT_ONLY + " cannot be specified with"
401 + " either -" + OPT_WRITE + " or -" + OPT_UPDATE + " or -" + OPT_READ);
402 }
403
404 if (!isInitOnly) {
405 if (!cmd.hasOption(OPT_NUM_KEYS)) {
406 throw new IllegalArgumentException(OPT_NUM_KEYS + " must be specified in "
407 + "read or write mode");
408 }
409 startKey = parseLong(cmd.getOptionValue(OPT_START_KEY,
410 String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE);
411 long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1,
412 Long.MAX_VALUE - startKey);
413 endKey = startKey + numKeys;
414 isSkipInit = cmd.hasOption(OPT_SKIP_INIT);
415 System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]");
416 }
417
418 parseColumnFamilyOptions(cmd);
419
420 if (isWrite) {
421 String[] writeOpts = splitColonSeparated(OPT_WRITE, 2, 3);
422
423 int colIndex = 0;
424 minColsPerKey = 1;
425 maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]);
426 int avgColDataSize =
427 parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE);
428 minColDataSize = avgColDataSize / 2;
429 maxColDataSize = avgColDataSize * 3 / 2;
430
431 if (colIndex < writeOpts.length) {
432 numWriterThreads = getNumThreads(writeOpts[colIndex++]);
433 }
434
435 isMultiPut = cmd.hasOption(OPT_MULTIPUT);
436
437 System.out.println("Multi-puts: " + isMultiPut);
438 System.out.println("Columns per key: " + minColsPerKey + ".."
439 + maxColsPerKey);
440 System.out.println("Data size per column: " + minColDataSize + ".."
441 + maxColDataSize);
442 }
443
444 if (isUpdate) {
445 String[] mutateOpts = splitColonSeparated(OPT_UPDATE, 1, 3);
446 int colIndex = 0;
447 updatePercent = parseInt(mutateOpts[colIndex++], 0, 100);
448 if (colIndex < mutateOpts.length) {
449 numUpdaterThreads = getNumThreads(mutateOpts[colIndex++]);
450 }
451 if (colIndex < mutateOpts.length) {
452 ignoreConflicts = parseInt(mutateOpts[colIndex++], 0, 1) == 1;
453 }
454
455 isBatchUpdate = cmd.hasOption(OPT_BATCHUPDATE);
456
457 System.out.println("Batch updates: " + isBatchUpdate);
458 System.out.println("Percent of keys to update: " + updatePercent);
459 System.out.println("Updater threads: " + numUpdaterThreads);
460 System.out.println("Ignore nonce conflicts: " + ignoreConflicts);
461 }
462
463 if (isRead) {
464 String[] readOpts = splitColonSeparated(OPT_READ, 1, 2);
465 int colIndex = 0;
466 verifyPercent = parseInt(readOpts[colIndex++], 0, 100);
467 if (colIndex < readOpts.length) {
468 numReaderThreads = getNumThreads(readOpts[colIndex++]);
469 }
470
471 if (cmd.hasOption(OPT_MAX_READ_ERRORS)) {
472 maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS),
473 0, Integer.MAX_VALUE);
474 }
475
476 if (cmd.hasOption(OPT_KEY_WINDOW)) {
477 keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW),
478 0, Integer.MAX_VALUE);
479 }
480
481 if (cmd.hasOption(OPT_MULTIGET)) {
482 multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET),
483 0, Integer.MAX_VALUE);
484 }
485
486 System.out.println("Multi-gets (value of 1 means no multigets): " + multiGetBatchSize);
487 System.out.println("Percent of keys to verify: " + verifyPercent);
488 System.out.println("Reader threads: " + numReaderThreads);
489 }
490
491 numTables = 1;
492 if (cmd.hasOption(NUM_TABLES)) {
493 numTables = parseInt(cmd.getOptionValue(NUM_TABLES), 1, Short.MAX_VALUE);
494 }
495
496 numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER;
497 if (cmd.hasOption(OPT_NUM_REGIONS_PER_SERVER)) {
498 numRegionsPerServer = Integer.parseInt(cmd.getOptionValue(OPT_NUM_REGIONS_PER_SERVER));
499 }
500
501 regionReplication = 1;
502 if (cmd.hasOption(OPT_REGION_REPLICATION)) {
503 regionReplication = Integer.parseInt(cmd.getOptionValue(OPT_REGION_REPLICATION));
504 }
505
506 regionReplicaId = -1;
507 if (cmd.hasOption(OPT_REGION_REPLICA_ID)) {
508 regionReplicaId = Integer.parseInt(cmd.getOptionValue(OPT_REGION_REPLICA_ID));
509 }
510 }
511
512 private void parseColumnFamilyOptions(CommandLine cmd) {
513 String dataBlockEncodingStr = cmd.getOptionValue(OPT_DATA_BLOCK_ENCODING);
514 dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null :
515 DataBlockEncoding.valueOf(dataBlockEncodingStr);
516
517 String compressStr = cmd.getOptionValue(OPT_COMPRESSION);
518 compressAlgo = compressStr == null ? Compression.Algorithm.NONE :
519 Compression.Algorithm.valueOf(compressStr);
520
521 String bloomStr = cmd.getOptionValue(OPT_BLOOM);
522 bloomType = bloomStr == null ? BloomType.ROW :
523 BloomType.valueOf(bloomStr);
524
525 inMemoryCF = cmd.hasOption(OPT_INMEMORY);
526 if (cmd.hasOption(OPT_ENCRYPTION)) {
527 cipher = Encryption.getCipher(conf, cmd.getOptionValue(OPT_ENCRYPTION));
528 }
529
530 }
531
532 public void initTestTable() throws IOException {
533 Durability durability = Durability.USE_DEFAULT;
534 if (deferredLogFlush) {
535 durability = Durability.ASYNC_WAL;
536 }
537
538 HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName,
539 getColumnFamilies(), compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer,
540 regionReplication, durability);
541 applyColumnFamilyOptions(tableName, getColumnFamilies());
542 }
543
544 @Override
545 protected int doWork() throws IOException {
546 if (!isVerbose) {
547 LogManager.getLogger(ZooKeeper.class.getName()).setLevel(Level.WARN);
548 }
549 if (numTables > 1) {
550 return parallelLoadTables();
551 } else {
552 return loadTable();
553 }
554 }
555
556 protected int loadTable() throws IOException {
557 if (cmd.hasOption(OPT_ZK_QUORUM)) {
558 conf.set(HConstants.ZOOKEEPER_QUORUM, cmd.getOptionValue(OPT_ZK_QUORUM));
559 }
560 if (cmd.hasOption(OPT_ZK_PARENT_NODE)) {
561 conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, cmd.getOptionValue(OPT_ZK_PARENT_NODE));
562 }
563
564 if (isInitOnly) {
565 LOG.info("Initializing only; no reads or writes");
566 initTestTable();
567 return 0;
568 }
569
570 if (!isSkipInit) {
571 initTestTable();
572 }
573 LoadTestDataGenerator dataGen = null;
574 if (cmd.hasOption(OPT_GENERATOR)) {
575 String[] clazzAndArgs = cmd.getOptionValue(OPT_GENERATOR).split(COLON);
576 dataGen = getLoadGeneratorInstance(clazzAndArgs[0]);
577 String[] args;
578 if (dataGen instanceof LoadTestDataGeneratorWithACL) {
579 LOG.info("Using LoadTestDataGeneratorWithACL");
580 if (User.isHBaseSecurityEnabled(conf)) {
581 LOG.info("Security is enabled");
582 authnFileName = clazzAndArgs[1];
583 superUser = clazzAndArgs[2];
584 userNames = clazzAndArgs[3];
585 args = Arrays.copyOfRange(clazzAndArgs, 2, clazzAndArgs.length);
586 Properties authConfig = new Properties();
587 authConfig.load(this.getClass().getClassLoader().getResourceAsStream(authnFileName));
588 try {
589 addAuthInfoToConf(authConfig, conf, superUser, userNames);
590 } catch (IOException exp) {
591 LOG.error(exp);
592 return EXIT_FAILURE;
593 }
594 userOwner = User.create(loginAndReturnUGI(conf, superUser));
595 } else {
596 superUser = clazzAndArgs[1];
597 userNames = clazzAndArgs[2];
598 args = Arrays.copyOfRange(clazzAndArgs, 1, clazzAndArgs.length);
599 userOwner = User.createUserForTesting(conf, superUser, new String[0]);
600 }
601 } else {
602 args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs, 1,
603 clazzAndArgs.length);
604 }
605 dataGen.initialize(args);
606 } else {
607
608 dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, maxColDataSize,
609 minColsPerKey, maxColsPerKey, families);
610 }
611
612 if (userOwner != null) {
613 LOG.info("Granting permissions for user " + userOwner.getShortName());
614 Permission.Action[] actions = {
615 Permission.Action.ADMIN, Permission.Action.CREATE,
616 Permission.Action.READ, Permission.Action.WRITE };
617 try {
618 AccessControlClient.grant(ConnectionFactory.createConnection(conf),
619 tableName, userOwner.getShortName(), null, null, actions);
620 } catch (Throwable e) {
621 LOG.fatal("Error in granting permission for the user " + userOwner.getShortName(), e);
622 return EXIT_FAILURE;
623 }
624 }
625
626 if (userNames != null) {
627
628 String users[] = userNames.split(",");
629 User user = null;
630 for (String userStr : users) {
631 if (User.isHBaseSecurityEnabled(conf)) {
632 user = User.create(loginAndReturnUGI(conf, userStr));
633 } else {
634 user = User.createUserForTesting(conf, userStr, new String[0]);
635 }
636 }
637 }
638
639 if (isWrite) {
640 if (userOwner != null) {
641 writerThreads = new MultiThreadedWriterWithACL(dataGen, conf, tableName, userOwner);
642 } else {
643 String writerClass = null;
644 if (cmd.hasOption(OPT_WRITER)) {
645 writerClass = cmd.getOptionValue(OPT_WRITER);
646 } else {
647 writerClass = MultiThreadedWriter.class.getCanonicalName();
648 }
649
650 writerThreads = getMultiThreadedWriterInstance(writerClass, dataGen);
651 }
652 writerThreads.setMultiPut(isMultiPut);
653 }
654
655 if (isUpdate) {
656 if (userOwner != null) {
657 updaterThreads = new MultiThreadedUpdaterWithACL(dataGen, conf, tableName, updatePercent,
658 userOwner, userNames);
659 } else {
660 String updaterClass = null;
661 if (cmd.hasOption(OPT_UPDATER)) {
662 updaterClass = cmd.getOptionValue(OPT_UPDATER);
663 } else {
664 updaterClass = MultiThreadedUpdater.class.getCanonicalName();
665 }
666 updaterThreads = getMultiThreadedUpdaterInstance(updaterClass, dataGen);
667 }
668 updaterThreads.setBatchUpdate(isBatchUpdate);
669 updaterThreads.setIgnoreNonceConflicts(ignoreConflicts);
670 }
671
672 if (isRead) {
673 if (userOwner != null) {
674 readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent,
675 userNames);
676 } else {
677 String readerClass = null;
678 if (cmd.hasOption(OPT_READER)) {
679 readerClass = cmd.getOptionValue(OPT_READER);
680 } else {
681 readerClass = MultiThreadedReader.class.getCanonicalName();
682 }
683 readerThreads = getMultiThreadedReaderInstance(readerClass, dataGen);
684 }
685 readerThreads.setMaxErrors(maxReadErrors);
686 readerThreads.setKeyWindow(keyWindow);
687 readerThreads.setMultiGetBatchSize(multiGetBatchSize);
688 readerThreads.setRegionReplicaId(regionReplicaId);
689 }
690
691 if (isUpdate && isWrite) {
692 LOG.info("Concurrent write/update workload: making updaters aware of the " +
693 "write point");
694 updaterThreads.linkToWriter(writerThreads);
695 }
696
697 if (isRead && (isUpdate || isWrite)) {
698 LOG.info("Concurrent write/read workload: making readers aware of the " +
699 "write point");
700 readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads);
701 }
702
703 if (isWrite) {
704 System.out.println("Starting to write data...");
705 writerThreads.start(startKey, endKey, numWriterThreads);
706 }
707
708 if (isUpdate) {
709 LOG.info("Starting to mutate data...");
710 System.out.println("Starting to mutate data...");
711
712
713 updaterThreads.start(startKey, endKey, numUpdaterThreads);
714 }
715
716 if (isRead) {
717 System.out.println("Starting to read data...");
718 readerThreads.start(startKey, endKey, numReaderThreads);
719 }
720
721 if (isWrite) {
722 writerThreads.waitForFinish();
723 }
724
725 if (isUpdate) {
726 updaterThreads.waitForFinish();
727 }
728
729 if (isRead) {
730 readerThreads.waitForFinish();
731 }
732
733 boolean success = true;
734 if (isWrite) {
735 success = success && writerThreads.getNumWriteFailures() == 0;
736 }
737 if (isUpdate) {
738 success = success && updaterThreads.getNumWriteFailures() == 0;
739 }
740 if (isRead) {
741 success = success && readerThreads.getNumReadErrors() == 0
742 && readerThreads.getNumReadFailures() == 0;
743 }
744 return success ? EXIT_SUCCESS : EXIT_FAILURE;
745 }
746
747 private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException {
748 try {
749 Class<?> clazz = Class.forName(clazzName);
750 Constructor<?> constructor = clazz.getConstructor(int.class, int.class, int.class, int.class,
751 byte[][].class);
752 return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize,
753 minColsPerKey, maxColsPerKey, families);
754 } catch (Exception e) {
755 throw new IOException(e);
756 }
757 }
758
759 private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName
760 , LoadTestDataGenerator dataGen) throws IOException {
761 try {
762 Class<?> clazz = Class.forName(clazzName);
763 Constructor<?> constructor = clazz.getConstructor(
764 LoadTestDataGenerator.class, Configuration.class, TableName.class);
765 return (MultiThreadedWriter) constructor.newInstance(dataGen, conf, tableName);
766 } catch (Exception e) {
767 throw new IOException(e);
768 }
769 }
770
771 private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName
772 , LoadTestDataGenerator dataGen) throws IOException {
773 try {
774 Class<?> clazz = Class.forName(clazzName);
775 Constructor<?> constructor = clazz.getConstructor(
776 LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class);
777 return (MultiThreadedUpdater) constructor.newInstance(
778 dataGen, conf, tableName, updatePercent);
779 } catch (Exception e) {
780 throw new IOException(e);
781 }
782 }
783
784 private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName
785 , LoadTestDataGenerator dataGen) throws IOException {
786 try {
787 Class<?> clazz = Class.forName(clazzName);
788 Constructor<?> constructor = clazz.getConstructor(
789 LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class);
790 return (MultiThreadedReader) constructor.newInstance(dataGen, conf, tableName, verifyPercent);
791 } catch (Exception e) {
792 throw new IOException(e);
793 }
794 }
795
796 public static byte[] generateData(final Random r, int length) {
797 byte [] b = new byte [length];
798 int i = 0;
799
800 for(i = 0; i < (length-8); i += 8) {
801 b[i] = (byte) (65 + r.nextInt(26));
802 b[i+1] = b[i];
803 b[i+2] = b[i];
804 b[i+3] = b[i];
805 b[i+4] = b[i];
806 b[i+5] = b[i];
807 b[i+6] = b[i];
808 b[i+7] = b[i];
809 }
810
811 byte a = (byte) (65 + r.nextInt(26));
812 for(; i < length; i++) {
813 b[i] = a;
814 }
815 return b;
816 }
817 public static void main(String[] args) {
818 new LoadTestTool().doStaticMain(args);
819 }
820
821
822
823
824
825
826
827
828
829 private int parallelLoadTables()
830 throws IOException {
831
832 String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME);
833 String[] newArgs = null;
834 if (!cmd.hasOption(LoadTestTool.OPT_TABLE_NAME)) {
835 newArgs = new String[cmdLineArgs.length + 2];
836 newArgs[0] = "-" + LoadTestTool.OPT_TABLE_NAME;
837 newArgs[1] = LoadTestTool.DEFAULT_TABLE_NAME;
838 System.arraycopy(cmdLineArgs, 0, newArgs, 2, cmdLineArgs.length);
839 } else {
840 newArgs = cmdLineArgs;
841 }
842
843 int tableNameValueIndex = -1;
844 for (int j = 0; j < newArgs.length; j++) {
845 if (newArgs[j].endsWith(OPT_TABLE_NAME)) {
846 tableNameValueIndex = j + 1;
847 } else if (newArgs[j].endsWith(NUM_TABLES)) {
848
849 newArgs[j + 1] = "1";
850 }
851 }
852
853
854 List<WorkerThread> workers = new ArrayList<WorkerThread>();
855 for (int i = 0; i < numTables; i++) {
856 String[] workerArgs = newArgs.clone();
857 workerArgs[tableNameValueIndex] = tableName + "_" + (i+1);
858 WorkerThread worker = new WorkerThread(i, workerArgs);
859 workers.add(worker);
860 LOG.info(worker + " starting");
861 worker.start();
862 }
863
864
865 LOG.info("Waiting for worker threads to finish");
866 for (WorkerThread t : workers) {
867 try {
868 t.join();
869 } catch (InterruptedException ie) {
870 IOException iie = new InterruptedIOException();
871 iie.initCause(ie);
872 throw iie;
873 }
874 checkForErrors();
875 }
876
877 return EXIT_SUCCESS;
878 }
879
880
881
882 protected AtomicReference<Throwable> thrown = new AtomicReference<Throwable>();
883
884 private void workerThreadError(Throwable t) {
885 thrown.compareAndSet(null, t);
886 }
887
888
889
890
891 private void checkForErrors() throws IOException {
892 Throwable thrown = this.thrown.get();
893 if (thrown == null) return;
894 if (thrown instanceof IOException) {
895 throw (IOException) thrown;
896 } else {
897 throw new RuntimeException(thrown);
898 }
899 }
900
901 class WorkerThread extends Thread {
902 private String[] workerArgs;
903
904 WorkerThread(int i, String[] args) {
905 super("WorkerThread-" + i);
906 workerArgs = args;
907 }
908
909 @Override
910 public void run() {
911 try {
912 int ret = ToolRunner.run(HBaseConfiguration.create(), new LoadTestTool(), workerArgs);
913 if (ret != 0) {
914 throw new RuntimeException("LoadTestTool exit with non-zero return code.");
915 }
916 } catch (Exception ex) {
917 LOG.error("Error in worker thread", ex);
918 workerThreadError(ex);
919 }
920 }
921 }
922
923 private void addAuthInfoToConf(Properties authConfig, Configuration conf, String owner,
924 String userList) throws IOException {
925 List<String> users = new ArrayList(Arrays.asList(userList.split(",")));
926 users.add(owner);
927 for (String user : users) {
928 String keyTabFileConfKey = "hbase." + user + ".keytab.file";
929 String principalConfKey = "hbase." + user + ".kerberos.principal";
930 if (!authConfig.containsKey(keyTabFileConfKey) || !authConfig.containsKey(principalConfKey)) {
931 throw new IOException("Authentication configs missing for user : " + user);
932 }
933 }
934 for (String key : authConfig.stringPropertyNames()) {
935 conf.set(key, authConfig.getProperty(key));
936 }
937 LOG.debug("Added authentication properties to config successfully.");
938 }
939
940 public static UserGroupInformation loginAndReturnUGI(Configuration conf, String username)
941 throws IOException {
942 String hostname = InetAddress.getLocalHost().getHostName();
943 String keyTabFileConfKey = "hbase." + username + ".keytab.file";
944 String keyTabFileLocation = conf.get(keyTabFileConfKey);
945 String principalConfKey = "hbase." + username + ".kerberos.principal";
946 String principal = SecurityUtil.getServerPrincipal(conf.get(principalConfKey), hostname);
947 if (keyTabFileLocation == null || principal == null) {
948 LOG.warn("Principal or key tab file null for : " + principalConfKey + ", "
949 + keyTabFileConfKey);
950 }
951 UserGroupInformation ugi =
952 UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation);
953 return ugi;
954 }
955 }