001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase;
019
020import static org.junit.Assert.assertEquals;
021import static org.junit.Assert.assertTrue;
022import static org.junit.Assert.fail;
023
024import edu.umd.cs.findbugs.annotations.Nullable;
025import java.io.File;
026import java.io.IOException;
027import java.io.OutputStream;
028import java.io.UncheckedIOException;
029import java.lang.reflect.Field;
030import java.lang.reflect.Modifier;
031import java.net.BindException;
032import java.net.DatagramSocket;
033import java.net.InetAddress;
034import java.net.ServerSocket;
035import java.net.Socket;
036import java.net.UnknownHostException;
037import java.nio.charset.StandardCharsets;
038import java.security.MessageDigest;
039import java.util.ArrayList;
040import java.util.Arrays;
041import java.util.Collection;
042import java.util.Collections;
043import java.util.HashSet;
044import java.util.Iterator;
045import java.util.List;
046import java.util.Map;
047import java.util.NavigableSet;
048import java.util.Properties;
049import java.util.Random;
050import java.util.Set;
051import java.util.TreeSet;
052import java.util.concurrent.ThreadLocalRandom;
053import java.util.concurrent.TimeUnit;
054import java.util.concurrent.atomic.AtomicReference;
055import java.util.function.BooleanSupplier;
056import java.util.stream.Collectors;
057import org.apache.commons.io.FileUtils;
058import org.apache.commons.lang3.RandomStringUtils;
059import org.apache.hadoop.conf.Configuration;
060import org.apache.hadoop.fs.FileSystem;
061import org.apache.hadoop.fs.Path;
062import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
063import org.apache.hadoop.hbase.Waiter.Predicate;
064import org.apache.hadoop.hbase.client.Admin;
065import org.apache.hadoop.hbase.client.BufferedMutator;
066import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
067import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
068import org.apache.hadoop.hbase.client.Connection;
069import org.apache.hadoop.hbase.client.ConnectionFactory;
070import org.apache.hadoop.hbase.client.Consistency;
071import org.apache.hadoop.hbase.client.Delete;
072import org.apache.hadoop.hbase.client.Durability;
073import org.apache.hadoop.hbase.client.Get;
074import org.apache.hadoop.hbase.client.HBaseAdmin;
075import org.apache.hadoop.hbase.client.Hbck;
076import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
077import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
078import org.apache.hadoop.hbase.client.MasterRegistry;
079import org.apache.hadoop.hbase.client.Put;
080import org.apache.hadoop.hbase.client.RegionInfo;
081import org.apache.hadoop.hbase.client.RegionInfoBuilder;
082import org.apache.hadoop.hbase.client.RegionLocator;
083import org.apache.hadoop.hbase.client.Result;
084import org.apache.hadoop.hbase.client.ResultScanner;
085import org.apache.hadoop.hbase.client.Scan;
086import org.apache.hadoop.hbase.client.Table;
087import org.apache.hadoop.hbase.client.TableDescriptor;
088import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
089import org.apache.hadoop.hbase.client.TableState;
090import org.apache.hadoop.hbase.fs.HFileSystem;
091import org.apache.hadoop.hbase.io.compress.Compression;
092import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
093import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
094import org.apache.hadoop.hbase.io.hfile.BlockCache;
095import org.apache.hadoop.hbase.io.hfile.ChecksumUtil;
096import org.apache.hadoop.hbase.io.hfile.HFile;
097import org.apache.hadoop.hbase.ipc.RpcServerInterface;
098import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
099import org.apache.hadoop.hbase.logging.Log4jUtils;
100import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
101import org.apache.hadoop.hbase.master.HMaster;
102import org.apache.hadoop.hbase.master.RegionState;
103import org.apache.hadoop.hbase.master.ServerManager;
104import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
105import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
106import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
107import org.apache.hadoop.hbase.master.assignment.RegionStates;
108import org.apache.hadoop.hbase.mob.MobFileCache;
109import org.apache.hadoop.hbase.regionserver.BloomType;
110import org.apache.hadoop.hbase.regionserver.ChunkCreator;
111import org.apache.hadoop.hbase.regionserver.HRegion;
112import org.apache.hadoop.hbase.regionserver.HRegionServer;
113import org.apache.hadoop.hbase.regionserver.HStore;
114import org.apache.hadoop.hbase.regionserver.InternalScanner;
115import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
116import org.apache.hadoop.hbase.regionserver.Region;
117import org.apache.hadoop.hbase.regionserver.RegionScanner;
118import org.apache.hadoop.hbase.regionserver.RegionServerServices;
119import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
120import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
121import org.apache.hadoop.hbase.security.User;
122import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache;
123import org.apache.hadoop.hbase.trace.TraceUtil;
124import org.apache.hadoop.hbase.util.Bytes;
125import org.apache.hadoop.hbase.util.CommonFSUtils;
126import org.apache.hadoop.hbase.util.FSTableDescriptors;
127import org.apache.hadoop.hbase.util.FSUtils;
128import org.apache.hadoop.hbase.util.JVMClusterUtil;
129import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
130import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
131import org.apache.hadoop.hbase.util.Pair;
132import org.apache.hadoop.hbase.util.ReflectionUtils;
133import org.apache.hadoop.hbase.util.RegionSplitter;
134import org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm;
135import org.apache.hadoop.hbase.util.RetryCounter;
136import org.apache.hadoop.hbase.util.Threads;
137import org.apache.hadoop.hbase.wal.WAL;
138import org.apache.hadoop.hbase.wal.WALFactory;
139import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
140import org.apache.hadoop.hbase.zookeeper.ZKConfig;
141import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
142import org.apache.hadoop.hdfs.DFSClient;
143import org.apache.hadoop.hdfs.DistributedFileSystem;
144import org.apache.hadoop.hdfs.MiniDFSCluster;
145import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
146import org.apache.hadoop.mapred.JobConf;
147import org.apache.hadoop.mapred.MiniMRCluster;
148import org.apache.hadoop.mapred.TaskLog;
149import org.apache.hadoop.minikdc.MiniKdc;
150import org.apache.yetus.audience.InterfaceAudience;
151import org.apache.zookeeper.WatchedEvent;
152import org.apache.zookeeper.ZooKeeper;
153import org.apache.zookeeper.ZooKeeper.States;
154
155import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
156
157import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
158
159/**
160 * Facility for testing HBase. Replacement for old HBaseTestCase and HBaseClusterTestCase
161 * functionality. Create an instance and keep it around testing HBase. This class is meant to be
162 * your one-stop shop for anything you might need testing. Manages one cluster at a time only.
163 * Managed cluster can be an in-process {@link MiniHBaseCluster}, or a deployed cluster of type
164 * {@code DistributedHBaseCluster}. Not all methods work with the real cluster. Depends on log4j
165 * being on classpath and hbase-site.xml for logging and test-run configuration. It does not set
166 * logging levels. In the configuration properties, default values for master-info-port and
167 * region-server-port are overridden such that a random port will be assigned (thus avoiding port
168 * contention if another local HBase instance is already running).
169 * <p>
170 * To preserve test data directories, pass the system property "hbase.testing.preserve.testdir"
171 * setting it to true.
172 */
173@InterfaceAudience.Public
174@SuppressWarnings("deprecation")
175public class HBaseTestingUtility extends HBaseZKTestingUtility {
176
177  /**
178   * System property key to get test directory value. Name is as it is because mini dfs has
179   * hard-codings to put test data here. It should NOT be used directly in HBase, as it's a property
180   * used in mini dfs.
181   * @deprecated since 2.0.0 and will be removed in 3.0.0. Can be used only with mini dfs.
182   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19410">HBASE-19410</a>
183   */
184  @Deprecated
185  private static final String TEST_DIRECTORY_KEY = "test.build.data";
186
187  public static final String REGIONS_PER_SERVER_KEY = "hbase.test.regions-per-server";
188  /**
189   * The default number of regions per regionserver when creating a pre-split table.
190   */
191  public static final int DEFAULT_REGIONS_PER_SERVER = 3;
192
193  public static final String PRESPLIT_TEST_TABLE_KEY = "hbase.test.pre-split-table";
194  public static final boolean PRESPLIT_TEST_TABLE = true;
195
196  private MiniDFSCluster dfsCluster = null;
197
198  private volatile HBaseCluster hbaseCluster = null;
199  private MiniMRCluster mrCluster = null;
200
201  /** If there is a mini cluster running for this testing utility instance. */
202  private volatile boolean miniClusterRunning;
203
204  private String hadoopLogDir;
205
206  /**
207   * Directory on test filesystem where we put the data for this instance of HBaseTestingUtility
208   */
209  private Path dataTestDirOnTestFS = null;
210
211  private final AtomicReference<Connection> connection = new AtomicReference<>();
212
213  /** Filesystem URI used for map-reduce mini-cluster setup */
214  private static String FS_URI;
215
216  /** This is for unit tests parameterized with a single boolean. */
217  public static final List<Object[]> MEMSTORETS_TAGS_PARAMETRIZED = memStoreTSAndTagsCombination();
218
219  /**
220   * Checks to see if a specific port is available.
221   * @param port the port number to check for availability
222   * @return <tt>true</tt> if the port is available, or <tt>false</tt> if not
223   */
224  public static boolean available(int port) {
225    ServerSocket ss = null;
226    DatagramSocket ds = null;
227    try {
228      ss = new ServerSocket(port);
229      ss.setReuseAddress(true);
230      ds = new DatagramSocket(port);
231      ds.setReuseAddress(true);
232      return true;
233    } catch (IOException e) {
234      // Do nothing
235    } finally {
236      if (ds != null) {
237        ds.close();
238      }
239
240      if (ss != null) {
241        try {
242          ss.close();
243        } catch (IOException e) {
244          /* should not be thrown */
245        }
246      }
247    }
248
249    return false;
250  }
251
252  /**
253   * Create all combinations of Bloom filters and compression algorithms for testing.
254   */
255  private static List<Object[]> bloomAndCompressionCombinations() {
256    List<Object[]> configurations = new ArrayList<>();
257    for (Compression.Algorithm comprAlgo : HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
258      for (BloomType bloomType : BloomType.values()) {
259        configurations.add(new Object[] { comprAlgo, bloomType });
260      }
261    }
262    return Collections.unmodifiableList(configurations);
263  }
264
265  /**
266   * Create combination of memstoreTS and tags
267   */
268  private static List<Object[]> memStoreTSAndTagsCombination() {
269    List<Object[]> configurations = new ArrayList<>();
270    configurations.add(new Object[] { false, false });
271    configurations.add(new Object[] { false, true });
272    configurations.add(new Object[] { true, false });
273    configurations.add(new Object[] { true, true });
274    return Collections.unmodifiableList(configurations);
275  }
276
277  public static List<Object[]> memStoreTSTagsAndOffheapCombination() {
278    List<Object[]> configurations = new ArrayList<>();
279    configurations.add(new Object[] { false, false, true });
280    configurations.add(new Object[] { false, false, false });
281    configurations.add(new Object[] { false, true, true });
282    configurations.add(new Object[] { false, true, false });
283    configurations.add(new Object[] { true, false, true });
284    configurations.add(new Object[] { true, false, false });
285    configurations.add(new Object[] { true, true, true });
286    configurations.add(new Object[] { true, true, false });
287    return Collections.unmodifiableList(configurations);
288  }
289
290  public static final Collection<Object[]> BLOOM_AND_COMPRESSION_COMBINATIONS =
291    bloomAndCompressionCombinations();
292
293  /**
294   * <p>
295   * Create an HBaseTestingUtility using a default configuration.
296   * <p>
297   * Initially, all tmp files are written to a local test data directory. Once
298   * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp
299   * data will be written to the DFS directory instead.
300   * <p>
301   * Previously, there was a distinction between the type of utility returned by
302   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
303   * HBaseTestingUtility objects will behave as local until a DFS cluster is started, at which point
304   * they will switch to using mini DFS for storage.
305   */
306  public HBaseTestingUtility() {
307    this(HBaseConfiguration.create());
308  }
309
310  /**
311   * <p>
312   * Create an HBaseTestingUtility using a given configuration.
313   * <p>
314   * Initially, all tmp files are written to a local test data directory. Once
315   * {@link #startMiniDFSCluster} is called, either directly or via {@link #startMiniCluster()}, tmp
316   * data will be written to the DFS directory instead.
317   * <p>
318   * Previously, there was a distinction between the type of utility returned by
319   * {@link #createLocalHTU()} and this constructor; this is no longer the case. All
320   * HBaseTestingUtility objects will behave as local until a DFS cluster is started, at which point
321   * they will switch to using mini DFS for storage.
322   * @param conf The configuration to use for further operations
323   */
324  public HBaseTestingUtility(@Nullable Configuration conf) {
325    super(conf);
326
327    // a hbase checksum verification failure will cause unit tests to fail
328    ChecksumUtil.generateExceptionForChecksumFailureForTest(true);
329
330    // Save this for when setting default file:// breaks things
331    if (this.conf.get("fs.defaultFS") != null) {
332      this.conf.set("original.defaultFS", this.conf.get("fs.defaultFS"));
333    }
334    if (this.conf.get(HConstants.HBASE_DIR) != null) {
335      this.conf.set("original.hbase.dir", this.conf.get(HConstants.HBASE_DIR));
336    }
337    // Every cluster is a local cluster until we start DFS
338    // Note that conf could be null, but this.conf will not be
339    String dataTestDir = getDataTestDir().toString();
340    this.conf.set("fs.defaultFS", "file:///");
341    this.conf.set(HConstants.HBASE_DIR, "file://" + dataTestDir);
342    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
343    this.conf.setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, false);
344    // If the value for random ports isn't set set it to true, thus making
345    // tests opt-out for random port assignment
346    this.conf.setBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS,
347      this.conf.getBoolean(LocalHBaseCluster.ASSIGN_RANDOM_PORTS, true));
348  }
349
350  /**
351   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #HBaseTestingUtility()}
352   *             instead.
353   * @return a normal HBaseTestingUtility
354   * @see #HBaseTestingUtility()
355   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
356   */
357  @Deprecated
358  public static HBaseTestingUtility createLocalHTU() {
359    return new HBaseTestingUtility();
360  }
361
362  /**
363   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
364   *             {@link #HBaseTestingUtility(Configuration)} instead.
365   * @return a normal HBaseTestingUtility
366   * @see #HBaseTestingUtility(Configuration)
367   * @see <a href="https://issues.apache.org/jira/browse/HBASE-19841">HBASE-19841</a>
368   */
369  @Deprecated
370  public static HBaseTestingUtility createLocalHTU(Configuration c) {
371    return new HBaseTestingUtility(c);
372  }
373
374  /**
375   * Close both the region {@code r} and it's underlying WAL. For use in tests.
376   */
377  public static void closeRegionAndWAL(final Region r) throws IOException {
378    closeRegionAndWAL((HRegion) r);
379  }
380
381  /**
382   * Close both the HRegion {@code r} and it's underlying WAL. For use in tests.
383   */
384  public static void closeRegionAndWAL(final HRegion r) throws IOException {
385    if (r == null) return;
386    r.close();
387    if (r.getWAL() == null) return;
388    r.getWAL().close();
389  }
390
391  /**
392   * Returns this classes's instance of {@link Configuration}. Be careful how you use the returned
393   * Configuration since {@link Connection} instances can be shared. The Map of Connections is keyed
394   * by the Configuration. If say, a Connection was being used against a cluster that had been
395   * shutdown, see {@link #shutdownMiniCluster()}, then the Connection will no longer be wholesome.
396   * Rather than use the return direct, its usually best to make a copy and use that. Do
397   * <code>Configuration c = new Configuration(INSTANCE.getConfiguration());</code>
398   * @return Instance of Configuration.
399   */
400  @Override
401  public Configuration getConfiguration() {
402    return super.getConfiguration();
403  }
404
405  public void setHBaseCluster(HBaseCluster hbaseCluster) {
406    this.hbaseCluster = hbaseCluster;
407  }
408
409  /**
410   * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. Give it a random name so can
411   * have many concurrent tests running if we need to. It needs to amend the
412   * {@link #TEST_DIRECTORY_KEY} System property, as it's what minidfscluster bases it data dir on.
413   * Moding a System property is not the way to do concurrent instances -- another instance could
414   * grab the temporary value unintentionally -- but not anything can do about it at moment; single
415   * instance only is how the minidfscluster works. We also create the underlying directory names
416   * for hadoop.log.dir, mapreduce.cluster.local.dir and hadoop.tmp.dir, and set the values in the
417   * conf, and as a system property for hadoop.tmp.dir (We do not create them!).
418   * @return The calculated data test build directory, if newly-created.
419   */
420  @Override
421  protected Path setupDataTestDir() {
422    Path testPath = super.setupDataTestDir();
423    if (null == testPath) {
424      return null;
425    }
426
427    createSubDirAndSystemProperty("hadoop.log.dir", testPath, "hadoop-log-dir");
428
429    // This is defaulted in core-default.xml to /tmp/hadoop-${user.name}, but
430    // we want our own value to ensure uniqueness on the same machine
431    createSubDirAndSystemProperty("hadoop.tmp.dir", testPath, "hadoop-tmp-dir");
432
433    // Read and modified in org.apache.hadoop.mapred.MiniMRCluster
434    createSubDir("mapreduce.cluster.local.dir", testPath, "mapred-local-dir");
435    return testPath;
436  }
437
438  private void createSubDirAndSystemProperty(String propertyName, Path parent, String subDirName) {
439
440    String sysValue = System.getProperty(propertyName);
441
442    if (sysValue != null) {
443      // There is already a value set. So we do nothing but hope
444      // that there will be no conflicts
445      LOG.info("System.getProperty(\"" + propertyName + "\") already set to: " + sysValue
446        + " so I do NOT create it in " + parent);
447      String confValue = conf.get(propertyName);
448      if (confValue != null && !confValue.endsWith(sysValue)) {
449        LOG.warn(propertyName + " property value differs in configuration and system: "
450          + "Configuration=" + confValue + " while System=" + sysValue
451          + " Erasing configuration value by system value.");
452      }
453      conf.set(propertyName, sysValue);
454    } else {
455      // Ok, it's not set, so we create it as a subdirectory
456      createSubDir(propertyName, parent, subDirName);
457      System.setProperty(propertyName, conf.get(propertyName));
458    }
459  }
460
461  /**
462   * @return Where to write test data on the test filesystem; Returns working directory for the test
463   *         filesystem by default
464   * @see #setupDataTestDirOnTestFS()
465   * @see #getTestFileSystem()
466   */
467  private Path getBaseTestDirOnTestFS() throws IOException {
468    FileSystem fs = getTestFileSystem();
469    return new Path(fs.getWorkingDirectory(), "test-data");
470  }
471
472  /**
473   * @return META table descriptor
474   * @deprecated since 2.0 version and will be removed in 3.0 version. Currently for test only. use
475   *             {@link #getMetaTableDescriptorBuilder()}
476   */
477  @Deprecated
478  public HTableDescriptor getMetaTableDescriptor() {
479    return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
480  }
481
482  /**
483   * @return META table descriptor
484   * @deprecated Since 2.3.0. No one should be using this internal. Used in testing only.
485   */
486  @Deprecated
487  @InterfaceAudience.Private
488  public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
489    try {
490      return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
491    } catch (IOException e) {
492      throw new RuntimeException("Unable to create META table descriptor", e);
493    }
494  }
495
496  /**
497   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write
498   * temporary test data. Call this method after setting up the mini dfs cluster if the test relies
499   * on it.
500   * @return a unique path in the test filesystem
501   */
502  public Path getDataTestDirOnTestFS() throws IOException {
503    if (dataTestDirOnTestFS == null) {
504      setupDataTestDirOnTestFS();
505    }
506
507    return dataTestDirOnTestFS;
508  }
509
510  /**
511   * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} to write
512   * temporary test data. Call this method after setting up the mini dfs cluster if the test relies
513   * on it.
514   * @return a unique path in the test filesystem
515   * @param subdirName name of the subdir to create under the base test dir
516   */
517  public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
518    return new Path(getDataTestDirOnTestFS(), subdirName);
519  }
520
521  /**
522   * Sets up a path in test filesystem to be used by tests. Creates a new directory if not already
523   * setup.
524   */
525  private void setupDataTestDirOnTestFS() throws IOException {
526    if (dataTestDirOnTestFS != null) {
527      LOG.warn("Data test on test fs dir already setup in " + dataTestDirOnTestFS.toString());
528      return;
529    }
530    dataTestDirOnTestFS = getNewDataTestDirOnTestFS();
531  }
532
533  /**
534   * Sets up a new path in test filesystem to be used by tests.
535   */
536  private Path getNewDataTestDirOnTestFS() throws IOException {
537    // The file system can be either local, mini dfs, or if the configuration
538    // is supplied externally, it can be an external cluster FS. If it is a local
539    // file system, the tests should use getBaseTestDir, otherwise, we can use
540    // the working directory, and create a unique sub dir there
541    FileSystem fs = getTestFileSystem();
542    Path newDataTestDir;
543    String randomStr = getRandomUUID().toString();
544    if (fs.getUri().getScheme().equals(FileSystem.getLocal(conf).getUri().getScheme())) {
545      newDataTestDir = new Path(getDataTestDir(), randomStr);
546      File dataTestDir = new File(newDataTestDir.toString());
547      if (deleteOnExit()) dataTestDir.deleteOnExit();
548    } else {
549      Path base = getBaseTestDirOnTestFS();
550      newDataTestDir = new Path(base, randomStr);
551      if (deleteOnExit()) fs.deleteOnExit(newDataTestDir);
552    }
553    return newDataTestDir;
554  }
555
556  /**
557   * Cleans the test data directory on the test filesystem.
558   * @return True if we removed the test dirs
559   */
560  public boolean cleanupDataTestDirOnTestFS() throws IOException {
561    boolean ret = getTestFileSystem().delete(dataTestDirOnTestFS, true);
562    if (ret) dataTestDirOnTestFS = null;
563    return ret;
564  }
565
566  /**
567   * Cleans a subdirectory under the test data directory on the test filesystem.
568   * @return True if we removed child
569   */
570  public boolean cleanupDataTestDirOnTestFS(String subdirName) throws IOException {
571    Path cpath = getDataTestDirOnTestFS(subdirName);
572    return getTestFileSystem().delete(cpath, true);
573  }
574
575  /**
576   * Start a minidfscluster.
577   * @param servers How many DNs to start.
578   * @see #shutdownMiniDFSCluster()
579   * @return The mini dfs cluster created.
580   */
581  public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
582    return startMiniDFSCluster(servers, null);
583  }
584
585  /**
586   * Start a minidfscluster. This is useful if you want to run datanode on distinct hosts for things
587   * like HDFS block location verification. If you start MiniDFSCluster without host names, all
588   * instances of the datanodes will have the same host name.
589   * @param hosts hostnames DNs to run on.
590   * @see #shutdownMiniDFSCluster()
591   * @return The mini dfs cluster created.
592   */
593  public MiniDFSCluster startMiniDFSCluster(final String hosts[]) throws Exception {
594    if (hosts != null && hosts.length != 0) {
595      return startMiniDFSCluster(hosts.length, hosts);
596    } else {
597      return startMiniDFSCluster(1, null);
598    }
599  }
600
601  /**
602   * Start a minidfscluster. Can only create one.
603   * @param servers How many DNs to start.
604   * @param hosts   hostnames DNs to run on.
605   * @see #shutdownMiniDFSCluster()
606   * @return The mini dfs cluster created.
607   */
608  public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[]) throws Exception {
609    return startMiniDFSCluster(servers, null, hosts);
610  }
611
612  private void setFs() throws IOException {
613    if (this.dfsCluster == null) {
614      LOG.info("Skipping setting fs because dfsCluster is null");
615      return;
616    }
617    FileSystem fs = this.dfsCluster.getFileSystem();
618    CommonFSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
619
620    // re-enable this check with dfs
621    conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
622  }
623
624  public MiniDFSCluster startMiniDFSCluster(int servers, final String racks[], String hosts[])
625    throws Exception {
626    createDirsAndSetProperties();
627    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
628
629    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
630    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
631    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
632      "ERROR");
633
634    TraceUtil.initTracer(conf);
635
636    this.dfsCluster =
637      new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
638
639    // Set this just-started cluster as our filesystem.
640    setFs();
641
642    // Wait for the cluster to be totally up
643    this.dfsCluster.waitClusterUp();
644
645    // reset the test directory for test file system
646    dataTestDirOnTestFS = null;
647    String dataTestDir = getDataTestDir().toString();
648    conf.set(HConstants.HBASE_DIR, dataTestDir);
649    LOG.debug("Setting {} to {}", HConstants.HBASE_DIR, dataTestDir);
650
651    return this.dfsCluster;
652  }
653
654  public MiniDFSCluster startMiniDFSClusterForTestWAL(int namenodePort) throws IOException {
655    createDirsAndSetProperties();
656    // Error level to skip some warnings specific to the minicluster. See HBASE-4709
657    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.util.MBeans.class.getName(), "ERROR");
658    Log4jUtils.setLogLevel(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class.getName(),
659      "ERROR");
660    dfsCluster =
661      new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
662    return dfsCluster;
663  }
664
665  /**
666   * This is used before starting HDFS and map-reduce mini-clusters Run something like the below to
667   * check for the likes of '/tmp' references -- i.e. references outside of the test data dir -- in
668   * the conf.
669   *
670   * <pre>
671   * Configuration conf = TEST_UTIL.getConfiguration();
672   * for (Iterator&lt;Map.Entry&lt;String, String&gt;&gt; i = conf.iterator(); i.hasNext();) {
673   *   Map.Entry&lt;String, String&gt; e = i.next();
674   *   assertFalse(e.getKey() + " " + e.getValue(), e.getValue().contains("/tmp"));
675   * }
676   * </pre>
677   */
678  private void createDirsAndSetProperties() throws IOException {
679    setupClusterTestDir();
680    conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
681    System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
682    createDirAndSetProperty("test.cache.data");
683    createDirAndSetProperty("hadoop.tmp.dir");
684    hadoopLogDir = createDirAndSetProperty("hadoop.log.dir");
685    createDirAndSetProperty("mapreduce.cluster.local.dir");
686    createDirAndSetProperty("mapreduce.cluster.temp.dir");
687    enableShortCircuit();
688
689    Path root = getDataTestDirOnTestFS("hadoop");
690    conf.set(MapreduceTestingShim.getMROutputDirProp(),
691      new Path(root, "mapred-output-dir").toString());
692    conf.set("mapreduce.jobtracker.system.dir", new Path(root, "mapred-system-dir").toString());
693    conf.set("mapreduce.jobtracker.staging.root.dir",
694      new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
695    conf.set("mapreduce.job.working.dir", new Path(root, "mapred-working-dir").toString());
696    conf.set("yarn.app.mapreduce.am.staging-dir",
697      new Path(root, "mapreduce-am-staging-root-dir").toString());
698
699    // Frustrate yarn's and hdfs's attempts at writing /tmp.
700    // Below is fragile. Make it so we just interpolate any 'tmp' reference.
701    createDirAndSetProperty("yarn.node-labels.fs-store.root-dir");
702    createDirAndSetProperty("yarn.node-attribute.fs-store.root-dir");
703    createDirAndSetProperty("yarn.nodemanager.log-dirs");
704    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
705    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.active-dir");
706    createDirAndSetProperty("yarn.timeline-service.entity-group-fs-store.done-dir");
707    createDirAndSetProperty("yarn.nodemanager.remote-app-log-dir");
708    createDirAndSetProperty("dfs.journalnode.edits.dir");
709    createDirAndSetProperty("dfs.datanode.shared.file.descriptor.paths");
710    createDirAndSetProperty("nfs.dump.dir");
711    createDirAndSetProperty("java.io.tmpdir");
712    createDirAndSetProperty("dfs.journalnode.edits.dir");
713    createDirAndSetProperty("dfs.provided.aliasmap.inmemory.leveldb.dir");
714    createDirAndSetProperty("fs.s3a.committer.staging.tmp.path");
715  }
716
717  /**
718   * Check whether the tests should assume NEW_VERSION_BEHAVIOR when creating new column families.
719   * Default to false.
720   */
721  public boolean isNewVersionBehaviorEnabled() {
722    final String propName = "hbase.tests.new.version.behavior";
723    String v = System.getProperty(propName);
724    if (v != null) {
725      return Boolean.parseBoolean(v);
726    }
727    return false;
728  }
729
730  /**
731   * Get the HBase setting for dfs.client.read.shortcircuit from the conf or a system property. This
732   * allows to specify this parameter on the command line. If not set, default is true.
733   */
734  public boolean isReadShortCircuitOn() {
735    final String propName = "hbase.tests.use.shortcircuit.reads";
736    String readOnProp = System.getProperty(propName);
737    if (readOnProp != null) {
738      return Boolean.parseBoolean(readOnProp);
739    } else {
740      return conf.getBoolean(propName, false);
741    }
742  }
743
744  /**
745   * Enable the short circuit read, unless configured differently. Set both HBase and HDFS settings,
746   * including skipping the hdfs checksum checks.
747   */
748  private void enableShortCircuit() {
749    if (isReadShortCircuitOn()) {
750      String curUser = System.getProperty("user.name");
751      LOG.info("read short circuit is ON for user " + curUser);
752      // read short circuit, for hdfs
753      conf.set("dfs.block.local-path-access.user", curUser);
754      // read short circuit, for hbase
755      conf.setBoolean("dfs.client.read.shortcircuit", true);
756      // Skip checking checksum, for the hdfs client and the datanode
757      conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
758    } else {
759      LOG.info("read short circuit is OFF");
760    }
761  }
762
763  private String createDirAndSetProperty(String property) {
764    return createDirAndSetProperty(property, property);
765  }
766
767  private String createDirAndSetProperty(final String relPath, String property) {
768    String path = getDataTestDir(relPath).toString();
769    System.setProperty(property, path);
770    conf.set(property, path);
771    new File(path).mkdirs();
772    LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
773    return path;
774  }
775
776  /**
777   * Shuts down instance created by call to {@link #startMiniDFSCluster(int)} or does nothing.
778   */
779  public void shutdownMiniDFSCluster() throws IOException {
780    if (this.dfsCluster != null) {
781      // The below throws an exception per dn, AsynchronousCloseException.
782      this.dfsCluster.shutdown();
783      dfsCluster = null;
784      dataTestDirOnTestFS = null;
785      CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
786    }
787  }
788
789  /**
790   * Start up a minicluster of hbase, dfs, and zookeeper where WAL's walDir is created separately.
791   * All other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
792   * @param createWALDir Whether to create a new WAL directory.
793   * @return The mini HBase cluster created.
794   * @see #shutdownMiniCluster()
795   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
796   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
797   * @see #startMiniCluster(StartMiniClusterOption)
798   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
799   */
800  @Deprecated
801  public MiniHBaseCluster startMiniCluster(boolean createWALDir) throws Exception {
802    StartMiniClusterOption option =
803      StartMiniClusterOption.builder().createWALDir(createWALDir).build();
804    return startMiniCluster(option);
805  }
806
807  /**
808   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
809   * defined in {@link StartMiniClusterOption.Builder}.
810   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
811   * @param createRootDir Whether to create a new root or data directory path.
812   * @return The mini HBase cluster created.
813   * @see #shutdownMiniCluster()
814   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
815   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
816   * @see #startMiniCluster(StartMiniClusterOption)
817   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
818   */
819  @Deprecated
820  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir) throws Exception {
821    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
822      .numDataNodes(numSlaves).createRootDir(createRootDir).build();
823    return startMiniCluster(option);
824  }
825
826  /**
827   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
828   * defined in {@link StartMiniClusterOption.Builder}.
829   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
830   * @param createRootDir Whether to create a new root or data directory path.
831   * @param createWALDir  Whether to create a new WAL directory.
832   * @return The mini HBase cluster created.
833   * @see #shutdownMiniCluster()
834   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
835   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
836   * @see #startMiniCluster(StartMiniClusterOption)
837   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
838   */
839  @Deprecated
840  public MiniHBaseCluster startMiniCluster(int numSlaves, boolean createRootDir,
841    boolean createWALDir) throws Exception {
842    StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(numSlaves)
843      .numDataNodes(numSlaves).createRootDir(createRootDir).createWALDir(createWALDir).build();
844    return startMiniCluster(option);
845  }
846
847  /**
848   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
849   * defined in {@link StartMiniClusterOption.Builder}.
850   * @param numMasters    Master node number.
851   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
852   * @param createRootDir Whether to create a new root or data directory path.
853   * @return The mini HBase cluster created.
854   * @see #shutdownMiniCluster()
855   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
856   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
857   * @see #startMiniCluster(StartMiniClusterOption)
858   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
859   */
860  @Deprecated
861  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, boolean createRootDir)
862    throws Exception {
863    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
864      .numRegionServers(numSlaves).createRootDir(createRootDir).numDataNodes(numSlaves).build();
865    return startMiniCluster(option);
866  }
867
868  /**
869   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
870   * defined in {@link StartMiniClusterOption.Builder}.
871   * @param numMasters Master node number.
872   * @param numSlaves  Slave node number, for both HBase region server and HDFS data node.
873   * @return The mini HBase cluster created.
874   * @see #shutdownMiniCluster()
875   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
876   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
877   * @see #startMiniCluster(StartMiniClusterOption)
878   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
879   */
880  @Deprecated
881  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves) throws Exception {
882    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
883      .numRegionServers(numSlaves).numDataNodes(numSlaves).build();
884    return startMiniCluster(option);
885  }
886
887  /**
888   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
889   * defined in {@link StartMiniClusterOption.Builder}.
890   * @param numMasters    Master node number.
891   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
892   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
893   *                      HDFS data node number.
894   * @param createRootDir Whether to create a new root or data directory path.
895   * @return The mini HBase cluster created.
896   * @see #shutdownMiniCluster()
897   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
898   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
899   * @see #startMiniCluster(StartMiniClusterOption)
900   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
901   */
902  @Deprecated
903  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
904    boolean createRootDir) throws Exception {
905    StartMiniClusterOption option =
906      StartMiniClusterOption.builder().numMasters(numMasters).numRegionServers(numSlaves)
907        .createRootDir(createRootDir).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
908    return startMiniCluster(option);
909  }
910
911  /**
912   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
913   * defined in {@link StartMiniClusterOption.Builder}.
914   * @param numMasters    Master node number.
915   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
916   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
917   *                      HDFS data node number.
918   * @return The mini HBase cluster created.
919   * @see #shutdownMiniCluster()
920   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
921   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
922   * @see #startMiniCluster(StartMiniClusterOption)
923   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
924   */
925  @Deprecated
926  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts)
927    throws Exception {
928    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
929      .numRegionServers(numSlaves).numDataNodes(numSlaves).dataNodeHosts(dataNodeHosts).build();
930    return startMiniCluster(option);
931  }
932
933  /**
934   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
935   * defined in {@link StartMiniClusterOption.Builder}.
936   * @param numMasters       Master node number.
937   * @param numRegionServers Number of region servers.
938   * @param numDataNodes     Number of datanodes.
939   * @return The mini HBase cluster created.
940   * @see #shutdownMiniCluster()
941   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
942   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
943   * @see #startMiniCluster(StartMiniClusterOption)
944   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
945   */
946  @Deprecated
947  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes)
948    throws Exception {
949    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
950      .numRegionServers(numRegionServers).numDataNodes(numDataNodes).build();
951    return startMiniCluster(option);
952  }
953
954  /**
955   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
956   * defined in {@link StartMiniClusterOption.Builder}.
957   * @param numMasters    Master node number.
958   * @param numSlaves     Slave node number, for both HBase region server and HDFS data node.
959   * @param dataNodeHosts The hostnames of DataNodes to run on. If not null, its size will overwrite
960   *                      HDFS data node number.
961   * @param masterClass   The class to use as HMaster, or null for default.
962   * @param rsClass       The class to use as HRegionServer, or null for default.
963   * @return The mini HBase cluster created.
964   * @see #shutdownMiniCluster()
965   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
966   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
967   * @see #startMiniCluster(StartMiniClusterOption)
968   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
969   */
970  @Deprecated
971  public MiniHBaseCluster startMiniCluster(int numMasters, int numSlaves, String[] dataNodeHosts,
972    Class<? extends HMaster> masterClass,
973    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) throws Exception {
974    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
975      .masterClass(masterClass).numRegionServers(numSlaves).rsClass(rsClass).numDataNodes(numSlaves)
976      .dataNodeHosts(dataNodeHosts).build();
977    return startMiniCluster(option);
978  }
979
980  /**
981   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
982   * defined in {@link StartMiniClusterOption.Builder}.
983   * @param numMasters       Master node number.
984   * @param numRegionServers Number of region servers.
985   * @param numDataNodes     Number of datanodes.
986   * @param dataNodeHosts    The hostnames of DataNodes to run on. If not null, its size will
987   *                         overwrite HDFS data node number.
988   * @param masterClass      The class to use as HMaster, or null for default.
989   * @param rsClass          The class to use as HRegionServer, or null for default.
990   * @return The mini HBase cluster created.
991   * @see #shutdownMiniCluster()
992   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
993   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
994   * @see #startMiniCluster(StartMiniClusterOption)
995   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
996   */
997  @Deprecated
998  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
999    String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1000    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass) throws Exception {
1001    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1002      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
1003      .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).build();
1004    return startMiniCluster(option);
1005  }
1006
1007  /**
1008   * Start up a minicluster of hbase, dfs, and zookeeper. All other options will use default values,
1009   * defined in {@link StartMiniClusterOption.Builder}.
1010   * @param numMasters       Master node number.
1011   * @param numRegionServers Number of region servers.
1012   * @param numDataNodes     Number of datanodes.
1013   * @param dataNodeHosts    The hostnames of DataNodes to run on. If not null, its size will
1014   *                         overwrite HDFS data node number.
1015   * @param masterClass      The class to use as HMaster, or null for default.
1016   * @param rsClass          The class to use as HRegionServer, or null for default.
1017   * @param createRootDir    Whether to create a new root or data directory path.
1018   * @param createWALDir     Whether to create a new WAL directory.
1019   * @return The mini HBase cluster created.
1020   * @see #shutdownMiniCluster()
1021   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1022   *             {@link #startMiniCluster(StartMiniClusterOption)} instead.
1023   * @see #startMiniCluster(StartMiniClusterOption)
1024   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1025   */
1026  @Deprecated
1027  public MiniHBaseCluster startMiniCluster(int numMasters, int numRegionServers, int numDataNodes,
1028    String[] dataNodeHosts, Class<? extends HMaster> masterClass,
1029    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
1030    boolean createWALDir) throws Exception {
1031    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1032      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass)
1033      .numDataNodes(numDataNodes).dataNodeHosts(dataNodeHosts).createRootDir(createRootDir)
1034      .createWALDir(createWALDir).build();
1035    return startMiniCluster(option);
1036  }
1037
1038  /**
1039   * Start up a minicluster of hbase, dfs and zookeeper clusters with given slave node number. All
1040   * other options will use default values, defined in {@link StartMiniClusterOption.Builder}.
1041   * @param numSlaves slave node number, for both HBase region server and HDFS data node.
1042   * @see #startMiniCluster(StartMiniClusterOption option)
1043   * @see #shutdownMiniDFSCluster()
1044   */
1045  public MiniHBaseCluster startMiniCluster(int numSlaves) throws Exception {
1046    StartMiniClusterOption option =
1047      StartMiniClusterOption.builder().numRegionServers(numSlaves).numDataNodes(numSlaves).build();
1048    return startMiniCluster(option);
1049  }
1050
1051  /**
1052   * Start up a minicluster of hbase, dfs and zookeeper all using default options. Option default
1053   * value can be found in {@link StartMiniClusterOption.Builder}.
1054   * @see #startMiniCluster(StartMiniClusterOption option)
1055   * @see #shutdownMiniDFSCluster()
1056   */
1057  public MiniHBaseCluster startMiniCluster() throws Exception {
1058    return startMiniCluster(StartMiniClusterOption.builder().build());
1059  }
1060
1061  /**
1062   * Start up a mini cluster of hbase, optionally dfs and zookeeper if needed. It modifies
1063   * Configuration. It homes the cluster data directory under a random subdirectory in a directory
1064   * under System property test.build.data, to be cleaned up on exit.
1065   * @see #shutdownMiniDFSCluster()
1066   */
1067  public MiniHBaseCluster startMiniCluster(StartMiniClusterOption option) throws Exception {
1068    LOG.info("Starting up minicluster with option: {}", option);
1069
1070    // If we already put up a cluster, fail.
1071    if (miniClusterRunning) {
1072      throw new IllegalStateException("A mini-cluster is already running");
1073    }
1074    miniClusterRunning = true;
1075
1076    setupClusterTestDir();
1077    System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
1078
1079    // Bring up mini dfs cluster. This spews a bunch of warnings about missing
1080    // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
1081    if (dfsCluster == null) {
1082      LOG.info("STARTING DFS");
1083      dfsCluster = startMiniDFSCluster(option.getNumDataNodes(), option.getDataNodeHosts());
1084    } else {
1085      LOG.info("NOT STARTING DFS");
1086    }
1087
1088    // Start up a zk cluster.
1089    if (getZkCluster() == null) {
1090      startMiniZKCluster(option.getNumZkServers());
1091    }
1092
1093    // Start the MiniHBaseCluster
1094    return startMiniHBaseCluster(option);
1095  }
1096
1097  /**
1098   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1099   * {@link #startMiniCluster()}. This is useful when doing stepped startup of clusters.
1100   * @return Reference to the hbase mini hbase cluster.
1101   * @see #startMiniCluster(StartMiniClusterOption)
1102   * @see #shutdownMiniHBaseCluster()
1103   */
1104  public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
1105    throws IOException, InterruptedException {
1106    // Now do the mini hbase cluster. Set the hbase.rootdir in config.
1107    createRootDir(option.isCreateRootDir());
1108    if (option.isCreateWALDir()) {
1109      createWALRootDir();
1110    }
1111    // Set the hbase.fs.tmp.dir config to make sure that we have some default value. This is
1112    // for tests that do not read hbase-defaults.xml
1113    setHBaseFsTmpDir();
1114
1115    // These settings will make the server waits until this exact number of
1116    // regions servers are connected.
1117    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1) == -1) {
1118      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, option.getNumRegionServers());
1119    }
1120    if (conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1) == -1) {
1121      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, option.getNumRegionServers());
1122    }
1123
1124    // Avoid log flooded with chore execution time, see HBASE-24646 for more details.
1125    Log4jUtils.setLogLevel(org.apache.hadoop.hbase.ScheduledChore.class.getName(), "INFO");
1126
1127    Configuration c = new Configuration(this.conf);
1128    TraceUtil.initTracer(c);
1129    this.hbaseCluster = new MiniHBaseCluster(c, option.getNumMasters(),
1130      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
1131      option.getMasterClass(), option.getRsClass());
1132    // Populate the master address configuration from mini cluster configuration.
1133    conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
1134    // Don't leave here till we've done a successful scan of the hbase:meta
1135    try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
1136      ResultScanner s = t.getScanner(new Scan())) {
1137      for (;;) {
1138        if (s.next() == null) {
1139          break;
1140        }
1141      }
1142    }
1143
1144    getAdmin(); // create immediately the hbaseAdmin
1145    LOG.info("Minicluster is up; activeMaster={}", getHBaseCluster().getMaster());
1146
1147    return (MiniHBaseCluster) hbaseCluster;
1148  }
1149
1150  /**
1151   * Starts up mini hbase cluster using default options. Default options can be found in
1152   * {@link StartMiniClusterOption.Builder}.
1153   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1154   * @see #shutdownMiniHBaseCluster()
1155   */
1156  public MiniHBaseCluster startMiniHBaseCluster() throws IOException, InterruptedException {
1157    return startMiniHBaseCluster(StartMiniClusterOption.builder().build());
1158  }
1159
1160  /**
1161   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1162   * {@link #startMiniCluster()}. All other options will use default values, defined in
1163   * {@link StartMiniClusterOption.Builder}.
1164   * @param numMasters       Master node number.
1165   * @param numRegionServers Number of region servers.
1166   * @return The mini HBase cluster created.
1167   * @see #shutdownMiniHBaseCluster()
1168   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1169   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1170   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1171   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1172   */
1173  @Deprecated
1174  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers)
1175    throws IOException, InterruptedException {
1176    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1177      .numRegionServers(numRegionServers).build();
1178    return startMiniHBaseCluster(option);
1179  }
1180
1181  /**
1182   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1183   * {@link #startMiniCluster()}. All other options will use default values, defined in
1184   * {@link StartMiniClusterOption.Builder}.
1185   * @param numMasters       Master node number.
1186   * @param numRegionServers Number of region servers.
1187   * @param rsPorts          Ports that RegionServer should use.
1188   * @return The mini HBase cluster created.
1189   * @see #shutdownMiniHBaseCluster()
1190   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1191   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1192   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1193   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1194   */
1195  @Deprecated
1196  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1197    List<Integer> rsPorts) throws IOException, InterruptedException {
1198    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1199      .numRegionServers(numRegionServers).rsPorts(rsPorts).build();
1200    return startMiniHBaseCluster(option);
1201  }
1202
1203  /**
1204   * Starts up mini hbase cluster. Usually you won't want this. You'll usually want
1205   * {@link #startMiniCluster()}. All other options will use default values, defined in
1206   * {@link StartMiniClusterOption.Builder}.
1207   * @param numMasters       Master node number.
1208   * @param numRegionServers Number of region servers.
1209   * @param rsPorts          Ports that RegionServer should use.
1210   * @param masterClass      The class to use as HMaster, or null for default.
1211   * @param rsClass          The class to use as HRegionServer, or null for default.
1212   * @param createRootDir    Whether to create a new root or data directory path.
1213   * @param createWALDir     Whether to create a new WAL directory.
1214   * @return The mini HBase cluster created.
1215   * @see #shutdownMiniHBaseCluster()
1216   * @deprecated since 2.2.0 and will be removed in 4.0.0. Use
1217   *             {@link #startMiniHBaseCluster(StartMiniClusterOption)} instead.
1218   * @see #startMiniHBaseCluster(StartMiniClusterOption)
1219   * @see <a href="https://issues.apache.org/jira/browse/HBASE-21071">HBASE-21071</a>
1220   */
1221  @Deprecated
1222  public MiniHBaseCluster startMiniHBaseCluster(int numMasters, int numRegionServers,
1223    List<Integer> rsPorts, Class<? extends HMaster> masterClass,
1224    Class<? extends MiniHBaseCluster.MiniHBaseClusterRegionServer> rsClass, boolean createRootDir,
1225    boolean createWALDir) throws IOException, InterruptedException {
1226    StartMiniClusterOption option = StartMiniClusterOption.builder().numMasters(numMasters)
1227      .masterClass(masterClass).numRegionServers(numRegionServers).rsClass(rsClass).rsPorts(rsPorts)
1228      .createRootDir(createRootDir).createWALDir(createWALDir).build();
1229    return startMiniHBaseCluster(option);
1230  }
1231
1232  /**
1233   * Starts the hbase cluster up again after shutting it down previously in a test. Use this if you
1234   * want to keep dfs/zk up and just stop/start hbase.
1235   * @param servers number of region servers
1236   */
1237  public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
1238    this.restartHBaseCluster(servers, null);
1239  }
1240
1241  public void restartHBaseCluster(int servers, List<Integer> ports)
1242    throws IOException, InterruptedException {
1243    StartMiniClusterOption option =
1244      StartMiniClusterOption.builder().numRegionServers(servers).rsPorts(ports).build();
1245    restartHBaseCluster(option);
1246    invalidateConnection();
1247  }
1248
1249  public void restartHBaseCluster(StartMiniClusterOption option)
1250    throws IOException, InterruptedException {
1251    closeConnection();
1252    this.hbaseCluster = new MiniHBaseCluster(this.conf, option.getNumMasters(),
1253      option.getNumAlwaysStandByMasters(), option.getNumRegionServers(), option.getRsPorts(),
1254      option.getMasterClass(), option.getRsClass());
1255    // Don't leave here till we've done a successful scan of the hbase:meta
1256    Connection conn = ConnectionFactory.createConnection(this.conf);
1257    Table t = conn.getTable(TableName.META_TABLE_NAME);
1258    ResultScanner s = t.getScanner(new Scan());
1259    while (s.next() != null) {
1260      // do nothing
1261    }
1262    LOG.info("HBase has been restarted");
1263    s.close();
1264    t.close();
1265    conn.close();
1266  }
1267
1268  /**
1269   * @return Current mini hbase cluster. Only has something in it after a call to
1270   *         {@link #startMiniCluster()}.
1271   * @see #startMiniCluster()
1272   */
1273  public MiniHBaseCluster getMiniHBaseCluster() {
1274    if (this.hbaseCluster == null || this.hbaseCluster instanceof MiniHBaseCluster) {
1275      return (MiniHBaseCluster) this.hbaseCluster;
1276    }
1277    throw new RuntimeException(
1278      hbaseCluster + " not an instance of " + MiniHBaseCluster.class.getName());
1279  }
1280
1281  /**
1282   * Stops mini hbase, zk, and hdfs clusters.
1283   * @see #startMiniCluster(int)
1284   */
1285  public void shutdownMiniCluster() throws IOException {
1286    LOG.info("Shutting down minicluster");
1287    shutdownMiniHBaseCluster();
1288    shutdownMiniDFSCluster();
1289    shutdownMiniZKCluster();
1290
1291    cleanupTestDir();
1292    miniClusterRunning = false;
1293    LOG.info("Minicluster is down");
1294  }
1295
1296  /**
1297   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
1298   * @throws java.io.IOException in case command is unsuccessful
1299   */
1300  public void shutdownMiniHBaseCluster() throws IOException {
1301    cleanup();
1302    if (this.hbaseCluster != null) {
1303      this.hbaseCluster.shutdown();
1304      // Wait till hbase is down before going on to shutdown zk.
1305      this.hbaseCluster.waitUntilShutDown();
1306      this.hbaseCluster = null;
1307    }
1308    if (zooKeeperWatcher != null) {
1309      zooKeeperWatcher.close();
1310      zooKeeperWatcher = null;
1311    }
1312  }
1313
1314  /**
1315   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
1316   * @throws java.io.IOException throws in case command is unsuccessful
1317   */
1318  public void killMiniHBaseCluster() throws IOException {
1319    cleanup();
1320    if (this.hbaseCluster != null) {
1321      getMiniHBaseCluster().killAll();
1322      this.hbaseCluster = null;
1323    }
1324    if (zooKeeperWatcher != null) {
1325      zooKeeperWatcher.close();
1326      zooKeeperWatcher = null;
1327    }
1328  }
1329
1330  // close hbase admin, close current connection and reset MIN MAX configs for RS.
1331  private void cleanup() throws IOException {
1332    closeConnection();
1333    // unset the configuration for MIN and MAX RS to start
1334    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
1335    conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
1336  }
1337
1338  /**
1339   * Returns the path to the default root dir the minicluster uses. If <code>create</code> is true,
1340   * a new root directory path is fetched irrespective of whether it has been fetched before or not.
1341   * If false, previous path is used. Note: this does not cause the root dir to be created.
1342   * @return Fully qualified path for the default hbase root dir
1343   */
1344  public Path getDefaultRootDirPath(boolean create) throws IOException {
1345    if (!create) {
1346      return getDataTestDirOnTestFS();
1347    } else {
1348      return getNewDataTestDirOnTestFS();
1349    }
1350  }
1351
1352  /**
1353   * Same as {{@link HBaseTestingUtility#getDefaultRootDirPath(boolean create)} except that
1354   * <code>create</code> flag is false. Note: this does not cause the root dir to be created.
1355   * @return Fully qualified path for the default hbase root dir
1356   */
1357  public Path getDefaultRootDirPath() throws IOException {
1358    return getDefaultRootDirPath(false);
1359  }
1360
1361  /**
1362   * Creates an hbase rootdir in user home directory. Also creates hbase version file. Normally you
1363   * won't make use of this method. Root hbasedir is created for you as part of mini cluster
1364   * startup. You'd only use this method if you were doing manual operation.
1365   * @param create This flag decides whether to get a new root or data directory path or not, if it
1366   *               has been fetched already. Note : Directory will be made irrespective of whether
1367   *               path has been fetched or not. If directory already exists, it will be overwritten
1368   * @return Fully qualified path to hbase root dir
1369   */
1370  public Path createRootDir(boolean create) throws IOException {
1371    FileSystem fs = FileSystem.get(this.conf);
1372    Path hbaseRootdir = getDefaultRootDirPath(create);
1373    CommonFSUtils.setRootDir(this.conf, hbaseRootdir);
1374    fs.mkdirs(hbaseRootdir);
1375    FSUtils.setVersion(fs, hbaseRootdir);
1376    return hbaseRootdir;
1377  }
1378
1379  /**
1380   * Same as {@link HBaseTestingUtility#createRootDir(boolean create)} except that
1381   * <code>create</code> flag is false.
1382   * @return Fully qualified path to hbase root dir
1383   */
1384  public Path createRootDir() throws IOException {
1385    return createRootDir(false);
1386  }
1387
1388  /**
1389   * Creates a hbase walDir in the user's home directory. Normally you won't make use of this
1390   * method. Root hbaseWALDir is created for you as part of mini cluster startup. You'd only use
1391   * this method if you were doing manual operation.
1392   * @return Fully qualified path to hbase root dir
1393   */
1394  public Path createWALRootDir() throws IOException {
1395    FileSystem fs = FileSystem.get(this.conf);
1396    Path walDir = getNewDataTestDirOnTestFS();
1397    CommonFSUtils.setWALRootDir(this.conf, walDir);
1398    fs.mkdirs(walDir);
1399    return walDir;
1400  }
1401
1402  private void setHBaseFsTmpDir() throws IOException {
1403    String hbaseFsTmpDirInString = this.conf.get("hbase.fs.tmp.dir");
1404    if (hbaseFsTmpDirInString == null) {
1405      this.conf.set("hbase.fs.tmp.dir", getDataTestDirOnTestFS("hbase-staging").toString());
1406      LOG.info("Setting hbase.fs.tmp.dir to " + this.conf.get("hbase.fs.tmp.dir"));
1407    } else {
1408      LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString);
1409    }
1410  }
1411
1412  /**
1413   * Flushes all caches in the mini hbase cluster
1414   */
1415  public void flush() throws IOException {
1416    getMiniHBaseCluster().flushcache();
1417  }
1418
1419  /**
1420   * Flushes all caches in the mini hbase cluster
1421   */
1422  public void flush(TableName tableName) throws IOException {
1423    getMiniHBaseCluster().flushcache(tableName);
1424  }
1425
1426  /**
1427   * Compact all regions in the mini hbase cluster
1428   */
1429  public void compact(boolean major) throws IOException {
1430    getMiniHBaseCluster().compact(major);
1431  }
1432
1433  /**
1434   * Compact all of a table's reagion in the mini hbase cluster
1435   */
1436  public void compact(TableName tableName, boolean major) throws IOException {
1437    getMiniHBaseCluster().compact(tableName, major);
1438  }
1439
1440  /**
1441   * Create a table.
1442   * @return A Table instance for the created table.
1443   */
1444  public Table createTable(TableName tableName, String family) throws IOException {
1445    return createTable(tableName, new String[] { family });
1446  }
1447
1448  /**
1449   * Create a table.
1450   * @return A Table instance for the created table.
1451   */
1452  public Table createTable(TableName tableName, String[] families) throws IOException {
1453    List<byte[]> fams = new ArrayList<>(families.length);
1454    for (String family : families) {
1455      fams.add(Bytes.toBytes(family));
1456    }
1457    return createTable(tableName, fams.toArray(new byte[0][]));
1458  }
1459
1460  /**
1461   * Create a table.
1462   * @return A Table instance for the created table.
1463   */
1464  public Table createTable(TableName tableName, byte[] family) throws IOException {
1465    return createTable(tableName, new byte[][] { family });
1466  }
1467
1468  /**
1469   * Create a table with multiple regions.
1470   * @return A Table instance for the created table.
1471   */
1472  public Table createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
1473    throws IOException {
1474    if (numRegions < 3) throw new IOException("Must create at least 3 regions");
1475    byte[] startKey = Bytes.toBytes("aaaaa");
1476    byte[] endKey = Bytes.toBytes("zzzzz");
1477    byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
1478
1479    return createTable(tableName, new byte[][] { family }, splitKeys);
1480  }
1481
1482  /**
1483   * Create a table.
1484   * @return A Table instance for the created table.
1485   */
1486  public Table createTable(TableName tableName, byte[][] families) throws IOException {
1487    return createTable(tableName, families, (byte[][]) null);
1488  }
1489
1490  /**
1491   * Create a table with multiple regions.
1492   * @return A Table instance for the created table.
1493   */
1494  public Table createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
1495    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
1496  }
1497
1498  /**
1499   * Create a table with multiple regions.
1500   * @param replicaCount replica count.
1501   * @return A Table instance for the created table.
1502   */
1503  public Table createMultiRegionTable(TableName tableName, int replicaCount, byte[][] families)
1504    throws IOException {
1505    return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE, replicaCount);
1506  }
1507
1508  /**
1509   * Create a table.
1510   * @return A Table instance for the created table.
1511   */
1512  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
1513    throws IOException {
1514    return createTable(tableName, families, splitKeys, 1, new Configuration(getConfiguration()));
1515  }
1516
1517  /**
1518   * Create a table.
1519   * @param tableName    the table name
1520   * @param families     the families
1521   * @param splitKeys    the splitkeys
1522   * @param replicaCount the region replica count
1523   * @return A Table instance for the created table.
1524   * @throws IOException throws IOException
1525   */
1526  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1527    int replicaCount) throws IOException {
1528    return createTable(tableName, families, splitKeys, replicaCount,
1529      new Configuration(getConfiguration()));
1530  }
1531
1532  public Table createTable(TableName tableName, byte[][] families, int numVersions, byte[] startKey,
1533    byte[] endKey, int numRegions) throws IOException {
1534    HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);
1535
1536    getAdmin().createTable(desc, startKey, endKey, numRegions);
1537    // HBaseAdmin only waits for regions to appear in hbase:meta we
1538    // should wait until they are assigned
1539    waitUntilAllRegionsAssigned(tableName);
1540    return getConnection().getTable(tableName);
1541  }
1542
1543  /**
1544   * Create a table.
1545   * @param c Configuration to use
1546   * @return A Table instance for the created table.
1547   */
1548  public Table createTable(TableDescriptor htd, byte[][] families, Configuration c)
1549    throws IOException {
1550    return createTable(htd, families, null, c);
1551  }
1552
1553  /**
1554   * Create a table.
1555   * @param htd       table descriptor
1556   * @param families  array of column families
1557   * @param splitKeys array of split keys
1558   * @param c         Configuration to use
1559   * @return A Table instance for the created table.
1560   * @throws IOException if getAdmin or createTable fails
1561   */
1562  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1563    Configuration c) throws IOException {
1564    // Disable blooms (they are on by default as of 0.95) but we disable them here because
1565    // tests have hard coded counts of what to expect in block cache, etc., and blooms being
1566    // on is interfering.
1567    return createTable(htd, families, splitKeys, BloomType.NONE, HConstants.DEFAULT_BLOCKSIZE, c);
1568  }
1569
1570  /**
1571   * Create a table.
1572   * @param htd       table descriptor
1573   * @param families  array of column families
1574   * @param splitKeys array of split keys
1575   * @param type      Bloom type
1576   * @param blockSize block size
1577   * @param c         Configuration to use
1578   * @return A Table instance for the created table.
1579   * @throws IOException if getAdmin or createTable fails
1580   */
1581
1582  public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys,
1583    BloomType type, int blockSize, Configuration c) throws IOException {
1584    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1585    for (byte[] family : families) {
1586      ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family)
1587        .setBloomFilterType(type).setBlocksize(blockSize);
1588      if (isNewVersionBehaviorEnabled()) {
1589        cfdb.setNewVersionBehavior(true);
1590      }
1591      builder.setColumnFamily(cfdb.build());
1592    }
1593    TableDescriptor td = builder.build();
1594    getAdmin().createTable(td, splitKeys);
1595    // HBaseAdmin only waits for regions to appear in hbase:meta
1596    // we should wait until they are assigned
1597    waitUntilAllRegionsAssigned(td.getTableName());
1598    return getConnection().getTable(td.getTableName());
1599  }
1600
1601  /**
1602   * Create a table.
1603   * @param htd       table descriptor
1604   * @param splitRows array of split keys
1605   * @return A Table instance for the created table.
1606   */
1607  public Table createTable(TableDescriptor htd, byte[][] splitRows) throws IOException {
1608    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd);
1609    if (isNewVersionBehaviorEnabled()) {
1610      for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
1611        builder.setColumnFamily(
1612          ColumnFamilyDescriptorBuilder.newBuilder(family).setNewVersionBehavior(true).build());
1613      }
1614    }
1615    getAdmin().createTable(builder.build(), splitRows);
1616    // HBaseAdmin only waits for regions to appear in hbase:meta
1617    // we should wait until they are assigned
1618    waitUntilAllRegionsAssigned(htd.getTableName());
1619    return getConnection().getTable(htd.getTableName());
1620  }
1621
1622  /**
1623   * Create a table.
1624   * @param tableName    the table name
1625   * @param families     the families
1626   * @param splitKeys    the split keys
1627   * @param replicaCount the replica count
1628   * @param c            Configuration to use
1629   * @return A Table instance for the created table.
1630   */
1631  public Table createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
1632    int replicaCount, final Configuration c) throws IOException {
1633    HTableDescriptor htd = new HTableDescriptor(tableName);
1634    htd.setRegionReplication(replicaCount);
1635    return createTable(htd, families, splitKeys, c);
1636  }
1637
1638  /**
1639   * Create a table.
1640   * @return A Table instance for the created table.
1641   */
1642  public Table createTable(TableName tableName, byte[] family, int numVersions) throws IOException {
1643    return createTable(tableName, new byte[][] { family }, numVersions);
1644  }
1645
1646  /**
1647   * Create a table.
1648   * @return A Table instance for the created table.
1649   */
1650  public Table createTable(TableName tableName, byte[][] families, int numVersions)
1651    throws IOException {
1652    return createTable(tableName, families, numVersions, (byte[][]) null);
1653  }
1654
1655  /**
1656   * Create a table.
1657   * @return A Table instance for the created table.
1658   */
1659  public Table createTable(TableName tableName, byte[][] families, int numVersions,
1660    byte[][] splitKeys) throws IOException {
1661    HTableDescriptor desc = new HTableDescriptor(tableName);
1662    for (byte[] family : families) {
1663      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
1664      if (isNewVersionBehaviorEnabled()) {
1665        hcd.setNewVersionBehavior(true);
1666      }
1667      desc.addFamily(hcd);
1668    }
1669    getAdmin().createTable(desc, splitKeys);
1670    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1671    // assigned
1672    waitUntilAllRegionsAssigned(tableName);
1673    return getConnection().getTable(tableName);
1674  }
1675
1676  /**
1677   * Create a table with multiple regions.
1678   * @return A Table instance for the created table.
1679   */
1680  public Table createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
1681    throws IOException {
1682    return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
1683  }
1684
1685  /**
1686   * Create a table.
1687   * @return A Table instance for the created table.
1688   */
1689  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize)
1690    throws IOException {
1691    HTableDescriptor desc = new HTableDescriptor(tableName);
1692    for (byte[] family : families) {
1693      HColumnDescriptor hcd =
1694        new HColumnDescriptor(family).setMaxVersions(numVersions).setBlocksize(blockSize);
1695      if (isNewVersionBehaviorEnabled()) {
1696        hcd.setNewVersionBehavior(true);
1697      }
1698      desc.addFamily(hcd);
1699    }
1700    getAdmin().createTable(desc);
1701    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1702    // assigned
1703    waitUntilAllRegionsAssigned(tableName);
1704    return getConnection().getTable(tableName);
1705  }
1706
1707  public Table createTable(TableName tableName, byte[][] families, int numVersions, int blockSize,
1708    String cpName) throws IOException {
1709    HTableDescriptor desc = new HTableDescriptor(tableName);
1710    for (byte[] family : families) {
1711      HColumnDescriptor hcd =
1712        new HColumnDescriptor(family).setMaxVersions(numVersions).setBlocksize(blockSize);
1713      if (isNewVersionBehaviorEnabled()) {
1714        hcd.setNewVersionBehavior(true);
1715      }
1716      desc.addFamily(hcd);
1717    }
1718    if (cpName != null) {
1719      desc.addCoprocessor(cpName);
1720    }
1721    getAdmin().createTable(desc);
1722    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1723    // assigned
1724    waitUntilAllRegionsAssigned(tableName);
1725    return getConnection().getTable(tableName);
1726  }
1727
1728  /**
1729   * Create a table.
1730   * @return A Table instance for the created table.
1731   */
1732  public Table createTable(TableName tableName, byte[][] families, int[] numVersions)
1733    throws IOException {
1734    HTableDescriptor desc = new HTableDescriptor(tableName);
1735    int i = 0;
1736    for (byte[] family : families) {
1737      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions[i]);
1738      if (isNewVersionBehaviorEnabled()) {
1739        hcd.setNewVersionBehavior(true);
1740      }
1741      desc.addFamily(hcd);
1742      i++;
1743    }
1744    getAdmin().createTable(desc);
1745    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1746    // assigned
1747    waitUntilAllRegionsAssigned(tableName);
1748    return getConnection().getTable(tableName);
1749  }
1750
1751  /**
1752   * Create a table.
1753   * @return A Table instance for the created table.
1754   */
1755  public Table createTable(TableName tableName, byte[] family, byte[][] splitRows)
1756    throws IOException {
1757    HTableDescriptor desc = new HTableDescriptor(tableName);
1758    HColumnDescriptor hcd = new HColumnDescriptor(family);
1759    if (isNewVersionBehaviorEnabled()) {
1760      hcd.setNewVersionBehavior(true);
1761    }
1762    desc.addFamily(hcd);
1763    getAdmin().createTable(desc, splitRows);
1764    // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
1765    // assigned
1766    waitUntilAllRegionsAssigned(tableName);
1767    return getConnection().getTable(tableName);
1768  }
1769
1770  /**
1771   * Create a table with multiple regions.
1772   * @return A Table instance for the created table.
1773   */
1774  public Table createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
1775    return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
1776  }
1777
1778  /**
1779   * Modify a table, synchronous. Waiting logic similar to that of {@code admin.rb#alter_status}.
1780   */
1781  @SuppressWarnings("serial")
1782  public static void modifyTableSync(Admin admin, TableDescriptor desc)
1783    throws IOException, InterruptedException {
1784    admin.modifyTable(desc);
1785    Pair<Integer, Integer> status = new Pair<Integer, Integer>() {
1786      {
1787        setFirst(0);
1788        setSecond(0);
1789      }
1790    };
1791    int i = 0;
1792    do {
1793      status = admin.getAlterStatus(desc.getTableName());
1794      if (status.getSecond() != 0) {
1795        LOG.debug(
1796          status.getSecond() - status.getFirst() + "/" + status.getSecond() + " regions updated.");
1797        Thread.sleep(1 * 1000L);
1798      } else {
1799        LOG.debug("All regions updated.");
1800        break;
1801      }
1802    } while (status.getFirst() != 0 && i++ < 500);
1803    if (status.getFirst() != 0) {
1804      throw new IOException("Failed to update all regions even after 500 seconds.");
1805    }
1806  }
1807
1808  /**
1809   * Set the number of Region replicas.
1810   */
1811  public static void setReplicas(Admin admin, TableName table, int replicaCount)
1812    throws IOException, InterruptedException {
1813    TableDescriptor desc = TableDescriptorBuilder.newBuilder(admin.getDescriptor(table))
1814      .setRegionReplication(replicaCount).build();
1815    admin.modifyTable(desc);
1816  }
1817
1818  /**
1819   * Drop an existing table
1820   * @param tableName existing table
1821   */
1822  public void deleteTable(TableName tableName) throws IOException {
1823    try {
1824      getAdmin().disableTable(tableName);
1825    } catch (TableNotEnabledException e) {
1826      LOG.debug("Table: " + tableName + " already disabled, so just deleting it.");
1827    }
1828    getAdmin().deleteTable(tableName);
1829  }
1830
1831  /**
1832   * Drop an existing table
1833   * @param tableName existing table
1834   */
1835  public void deleteTableIfAny(TableName tableName) throws IOException {
1836    try {
1837      deleteTable(tableName);
1838    } catch (TableNotFoundException e) {
1839      // ignore
1840    }
1841  }
1842
1843  // ==========================================================================
1844  // Canned table and table descriptor creation
1845  // TODO replace HBaseTestCase
1846
1847  public final static byte[] fam1 = Bytes.toBytes("colfamily11");
1848  public final static byte[] fam2 = Bytes.toBytes("colfamily21");
1849  public final static byte[] fam3 = Bytes.toBytes("colfamily31");
1850  public static final byte[][] COLUMNS = { fam1, fam2, fam3 };
1851  private static final int MAXVERSIONS = 3;
1852
1853  public static final char FIRST_CHAR = 'a';
1854  public static final char LAST_CHAR = 'z';
1855  public static final byte[] START_KEY_BYTES = { FIRST_CHAR, FIRST_CHAR, FIRST_CHAR };
1856  public static final String START_KEY = new String(START_KEY_BYTES, HConstants.UTF8_CHARSET);
1857
1858  /**
1859   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
1860   *             {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
1861   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
1862   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
1863   */
1864  @Deprecated
1865  public HTableDescriptor createTableDescriptor(final String name, final int minVersions,
1866    final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1867    return this.createTableDescriptor(TableName.valueOf(name), minVersions, versions, ttl,
1868      keepDeleted);
1869  }
1870
1871  /**
1872   * Create a table of name <code>name</code>.
1873   * @param name Name to give table.
1874   * @return Column descriptor.
1875   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
1876   *             {@link #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)} instead.
1877   * @see #createTableDescriptor(TableName, int, int, int, KeepDeletedCells)
1878   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
1879   */
1880  @Deprecated
1881  public HTableDescriptor createTableDescriptor(final String name) {
1882    return createTableDescriptor(TableName.valueOf(name), HColumnDescriptor.DEFAULT_MIN_VERSIONS,
1883      MAXVERSIONS, HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1884  }
1885
1886  public HTableDescriptor createTableDescriptor(final TableName name, final int minVersions,
1887    final int versions, final int ttl, KeepDeletedCells keepDeleted) {
1888    HTableDescriptor htd = new HTableDescriptor(name);
1889    for (byte[] cfName : new byte[][] { fam1, fam2, fam3 }) {
1890      HColumnDescriptor hcd =
1891        new HColumnDescriptor(cfName).setMinVersions(minVersions).setMaxVersions(versions)
1892          .setKeepDeletedCells(keepDeleted).setBlockCacheEnabled(false).setTimeToLive(ttl);
1893      if (isNewVersionBehaviorEnabled()) {
1894        hcd.setNewVersionBehavior(true);
1895      }
1896      htd.addFamily(hcd);
1897    }
1898    return htd;
1899  }
1900
1901  /**
1902   * Create a table of name <code>name</code>.
1903   * @param name Name to give table.
1904   * @return Column descriptor.
1905   */
1906  public HTableDescriptor createTableDescriptor(final TableName name) {
1907    return createTableDescriptor(name, HColumnDescriptor.DEFAULT_MIN_VERSIONS, MAXVERSIONS,
1908      HConstants.FOREVER, HColumnDescriptor.DEFAULT_KEEP_DELETED);
1909  }
1910
1911  public HTableDescriptor createTableDescriptor(final TableName tableName, byte[] family) {
1912    return createTableDescriptor(tableName, new byte[][] { family }, 1);
1913  }
1914
1915  public HTableDescriptor createTableDescriptor(final TableName tableName, byte[][] families,
1916    int maxVersions) {
1917    HTableDescriptor desc = new HTableDescriptor(tableName);
1918    for (byte[] family : families) {
1919      HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(maxVersions);
1920      if (isNewVersionBehaviorEnabled()) {
1921        hcd.setNewVersionBehavior(true);
1922      }
1923      desc.addFamily(hcd);
1924    }
1925    return desc;
1926  }
1927
1928  /**
1929   * Create an HRegion that writes to the local tmp dirs
1930   * @param desc     a table descriptor indicating which table the region belongs to
1931   * @param startKey the start boundary of the region
1932   * @param endKey   the end boundary of the region
1933   * @return a region that writes to local dir for testing
1934   */
1935  public HRegion createLocalHRegion(TableDescriptor desc, byte[] startKey, byte[] endKey)
1936    throws IOException {
1937    HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
1938    return createLocalHRegion(hri, desc);
1939  }
1940
1941  /**
1942   * Create an HRegion that writes to the local tmp dirs. Creates the WAL for you. Be sure to call
1943   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it.
1944   */
1945  public HRegion createLocalHRegion(RegionInfo info, TableDescriptor desc) throws IOException {
1946    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc);
1947  }
1948
1949  /**
1950   * Create an HRegion that writes to the local tmp dirs with specified wal
1951   * @param info regioninfo
1952   * @param conf configuration
1953   * @param desc table descriptor
1954   * @param wal  wal for this region.
1955   * @return created hregion
1956   */
1957  public HRegion createLocalHRegion(RegionInfo info, Configuration conf, TableDescriptor desc,
1958    WAL wal) throws IOException {
1959    return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
1960  }
1961
1962  /**
1963   * Create an HRegion that writes to the local tmp dirs with specified wal
1964   * @param info regioninfo
1965   * @param info configuration
1966   * @param desc table descriptor
1967   * @param wal  wal for this region.
1968   * @return created hregion
1969   */
1970  public HRegion createLocalHRegion(HRegionInfo info, Configuration conf, HTableDescriptor desc,
1971    WAL wal) throws IOException {
1972    return HRegion.createHRegion(info, getDataTestDir(), conf, desc, wal);
1973  }
1974
1975  /**
1976   * @param tableName     the name of the table
1977   * @param startKey      the start key of the region
1978   * @param stopKey       the stop key of the region
1979   * @param callingMethod the name of the calling method probably a test method
1980   * @param conf          the configuration to use
1981   * @param isReadOnly    {@code true} if the table is read only, {@code false} otherwise
1982   * @param families      the column families to use
1983   * @throws IOException if an IO problem is encountered
1984   * @return A region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
1985   *         when done.
1986   * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #createLocalHRegion(TableName,
1987   *             byte[], byte[], boolean, Durability, WAL, byte[]...)} instead.
1988   * @see #createLocalHRegion(TableName, byte[], byte[], boolean, Durability, WAL, byte[]...)
1989   * @see <a href="https://issues.apache.org/jira/browse/HBASE-13893">HBASE-13893</a>
1990   */
1991  @Deprecated
1992  public HRegion createLocalHRegion(byte[] tableName, byte[] startKey, byte[] stopKey,
1993    String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
1994    byte[]... families) throws IOException {
1995    return createLocalHRegion(TableName.valueOf(tableName), startKey, stopKey, conf, isReadOnly,
1996      durability, wal, families);
1997  }
1998
1999  /**
2000   * Return a region on which you must call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)}
2001   * when done.
2002   */
2003  public HRegion createLocalHRegion(TableName tableName, byte[] startKey, byte[] stopKey,
2004    Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families)
2005    throws IOException {
2006    return createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey, conf, isReadOnly,
2007      durability, wal, null, families);
2008  }
2009
2010  public HRegion createLocalHRegionWithInMemoryFlags(TableName tableName, byte[] startKey,
2011    byte[] stopKey, Configuration conf, boolean isReadOnly, Durability durability, WAL wal,
2012    boolean[] compactedMemStore, byte[]... families) throws IOException {
2013    HTableDescriptor htd = new HTableDescriptor(tableName);
2014    htd.setReadOnly(isReadOnly);
2015    int i = 0;
2016    for (byte[] family : families) {
2017      HColumnDescriptor hcd = new HColumnDescriptor(family);
2018      if (compactedMemStore != null && i < compactedMemStore.length) {
2019        hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
2020      } else {
2021        hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
2022
2023      }
2024      i++;
2025      // Set default to be three versions.
2026      hcd.setMaxVersions(Integer.MAX_VALUE);
2027      htd.addFamily(hcd);
2028    }
2029    htd.setDurability(durability);
2030    HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false);
2031    return createLocalHRegion(info, conf, htd, wal);
2032  }
2033
2034  //
2035  // ==========================================================================
2036
2037  /**
2038   * Provide an existing table name to truncate. Scans the table and issues a delete for each row
2039   * read.
2040   * @param tableName existing table
2041   * @return HTable to that new table
2042   */
2043  public Table deleteTableData(TableName tableName) throws IOException {
2044    Table table = getConnection().getTable(tableName);
2045    Scan scan = new Scan();
2046    ResultScanner resScan = table.getScanner(scan);
2047    for (Result res : resScan) {
2048      Delete del = new Delete(res.getRow());
2049      table.delete(del);
2050    }
2051    resScan = table.getScanner(scan);
2052    resScan.close();
2053    return table;
2054  }
2055
2056  /**
2057   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
2058   * table.
2059   * @param tableName       table which must exist.
2060   * @param preserveRegions keep the existing split points
2061   * @return HTable for the new table
2062   */
2063  public Table truncateTable(final TableName tableName, final boolean preserveRegions)
2064    throws IOException {
2065    Admin admin = getAdmin();
2066    if (!admin.isTableDisabled(tableName)) {
2067      admin.disableTable(tableName);
2068    }
2069    admin.truncateTable(tableName, preserveRegions);
2070    return getConnection().getTable(tableName);
2071  }
2072
2073  /**
2074   * Truncate a table using the admin command. Effectively disables, deletes, and recreates the
2075   * table. For previous behavior of issuing row deletes, see deleteTableData. Expressly does not
2076   * preserve regions of existing table.
2077   * @param tableName table which must exist.
2078   * @return HTable for the new table
2079   */
2080  public Table truncateTable(final TableName tableName) throws IOException {
2081    return truncateTable(tableName, false);
2082  }
2083
2084  /**
2085   * Load table with rows from 'aaa' to 'zzz'.
2086   * @param t Table
2087   * @param f Family
2088   * @return Count of rows loaded.
2089   */
2090  public int loadTable(final Table t, final byte[] f) throws IOException {
2091    return loadTable(t, new byte[][] { f });
2092  }
2093
2094  /**
2095   * Load table with rows from 'aaa' to 'zzz'.
2096   * @param t Table
2097   * @param f Family
2098   * @return Count of rows loaded.
2099   */
2100  public int loadTable(final Table t, final byte[] f, boolean writeToWAL) throws IOException {
2101    return loadTable(t, new byte[][] { f }, null, writeToWAL);
2102  }
2103
2104  /**
2105   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2106   * @param t Table
2107   * @param f Array of Families to load
2108   * @return Count of rows loaded.
2109   */
2110  public int loadTable(final Table t, final byte[][] f) throws IOException {
2111    return loadTable(t, f, null);
2112  }
2113
2114  /**
2115   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2116   * @param t     Table
2117   * @param f     Array of Families to load
2118   * @param value the values of the cells. If null is passed, the row key is used as value
2119   * @return Count of rows loaded.
2120   */
2121  public int loadTable(final Table t, final byte[][] f, byte[] value) throws IOException {
2122    return loadTable(t, f, value, true);
2123  }
2124
2125  /**
2126   * Load table of multiple column families with rows from 'aaa' to 'zzz'.
2127   * @param t     Table
2128   * @param f     Array of Families to load
2129   * @param value the values of the cells. If null is passed, the row key is used as value
2130   * @return Count of rows loaded.
2131   */
2132  public int loadTable(final Table t, final byte[][] f, byte[] value, boolean writeToWAL)
2133    throws IOException {
2134    List<Put> puts = new ArrayList<>();
2135    for (byte[] row : HBaseTestingUtility.ROWS) {
2136      Put put = new Put(row);
2137      put.setDurability(writeToWAL ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
2138      for (int i = 0; i < f.length; i++) {
2139        byte[] value1 = value != null ? value : row;
2140        put.addColumn(f[i], f[i], value1);
2141      }
2142      puts.add(put);
2143    }
2144    t.put(puts);
2145    return puts.size();
2146  }
2147
2148  /**
2149   * A tracker for tracking and validating table rows generated with
2150   * {@link HBaseTestingUtility#loadTable(Table, byte[])}
2151   */
2152  public static class SeenRowTracker {
2153    int dim = 'z' - 'a' + 1;
2154    int[][][] seenRows = new int[dim][dim][dim]; // count of how many times the row is seen
2155    byte[] startRow;
2156    byte[] stopRow;
2157
2158    public SeenRowTracker(byte[] startRow, byte[] stopRow) {
2159      this.startRow = startRow;
2160      this.stopRow = stopRow;
2161    }
2162
2163    void reset() {
2164      for (byte[] row : ROWS) {
2165        seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
2166      }
2167    }
2168
2169    int i(byte b) {
2170      return b - 'a';
2171    }
2172
2173    public void addRow(byte[] row) {
2174      seenRows[i(row[0])][i(row[1])][i(row[2])]++;
2175    }
2176
2177    /**
2178     * Validate that all the rows between startRow and stopRow are seen exactly once, and all other
2179     * rows none
2180     */
2181    public void validate() {
2182      for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2183        for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2184          for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2185            int count = seenRows[i(b1)][i(b2)][i(b3)];
2186            int expectedCount = 0;
2187            if (
2188              Bytes.compareTo(new byte[] { b1, b2, b3 }, startRow) >= 0
2189                && Bytes.compareTo(new byte[] { b1, b2, b3 }, stopRow) < 0
2190            ) {
2191              expectedCount = 1;
2192            }
2193            if (count != expectedCount) {
2194              String row = new String(new byte[] { b1, b2, b3 }, StandardCharsets.UTF_8);
2195              throw new RuntimeException("Row:" + row + " has a seen count of " + count + " "
2196                + "instead of " + expectedCount);
2197            }
2198          }
2199        }
2200      }
2201    }
2202  }
2203
2204  public int loadRegion(final HRegion r, final byte[] f) throws IOException {
2205    return loadRegion(r, f, false);
2206  }
2207
2208  public int loadRegion(final Region r, final byte[] f) throws IOException {
2209    return loadRegion((HRegion) r, f);
2210  }
2211
2212  /**
2213   * Load region with rows from 'aaa' to 'zzz'.
2214   * @param r     Region
2215   * @param f     Family
2216   * @param flush flush the cache if true
2217   * @return Count of rows loaded.
2218   */
2219  public int loadRegion(final HRegion r, final byte[] f, final boolean flush) throws IOException {
2220    byte[] k = new byte[3];
2221    int rowCount = 0;
2222    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2223      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2224        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2225          k[0] = b1;
2226          k[1] = b2;
2227          k[2] = b3;
2228          Put put = new Put(k);
2229          put.setDurability(Durability.SKIP_WAL);
2230          put.addColumn(f, null, k);
2231          if (r.getWAL() == null) {
2232            put.setDurability(Durability.SKIP_WAL);
2233          }
2234          int preRowCount = rowCount;
2235          int pause = 10;
2236          int maxPause = 1000;
2237          while (rowCount == preRowCount) {
2238            try {
2239              r.put(put);
2240              rowCount++;
2241            } catch (RegionTooBusyException e) {
2242              pause = (pause * 2 >= maxPause) ? maxPause : pause * 2;
2243              Threads.sleep(pause);
2244            }
2245          }
2246        }
2247      }
2248      if (flush) {
2249        r.flush(true);
2250      }
2251    }
2252    return rowCount;
2253  }
2254
2255  public void loadNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2256    throws IOException {
2257    for (int i = startRow; i < endRow; i++) {
2258      byte[] data = Bytes.toBytes(String.valueOf(i));
2259      Put put = new Put(data);
2260      put.addColumn(f, null, data);
2261      t.put(put);
2262    }
2263  }
2264
2265  public void loadRandomRows(final Table t, final byte[] f, int rowSize, int totalRows)
2266    throws IOException {
2267    byte[] row = new byte[rowSize];
2268    for (int i = 0; i < totalRows; i++) {
2269      Bytes.random(row);
2270      Put put = new Put(row);
2271      put.addColumn(f, new byte[] { 0 }, new byte[] { 0 });
2272      t.put(put);
2273    }
2274  }
2275
2276  public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow,
2277    int replicaId) throws IOException {
2278    for (int i = startRow; i < endRow; i++) {
2279      String failMsg = "Failed verification of row :" + i;
2280      byte[] data = Bytes.toBytes(String.valueOf(i));
2281      Get get = new Get(data);
2282      get.setReplicaId(replicaId);
2283      get.setConsistency(Consistency.TIMELINE);
2284      Result result = table.get(get);
2285      assertTrue(failMsg, result.containsColumn(f, null));
2286      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2287      Cell cell = result.getColumnLatestCell(f, null);
2288      assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(),
2289        cell.getValueOffset(), cell.getValueLength()));
2290    }
2291  }
2292
2293  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow)
2294    throws IOException {
2295    verifyNumericRows((HRegion) region, f, startRow, endRow);
2296  }
2297
2298  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow)
2299    throws IOException {
2300    verifyNumericRows(region, f, startRow, endRow, true);
2301  }
2302
2303  public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow,
2304    final boolean present) throws IOException {
2305    verifyNumericRows((HRegion) region, f, startRow, endRow, present);
2306  }
2307
2308  public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
2309    final boolean present) throws IOException {
2310    for (int i = startRow; i < endRow; i++) {
2311      String failMsg = "Failed verification of row :" + i;
2312      byte[] data = Bytes.toBytes(String.valueOf(i));
2313      Result result = region.get(new Get(data));
2314
2315      boolean hasResult = result != null && !result.isEmpty();
2316      assertEquals(failMsg + result, present, hasResult);
2317      if (!present) continue;
2318
2319      assertTrue(failMsg, result.containsColumn(f, null));
2320      assertEquals(failMsg, 1, result.getColumnCells(f, null).size());
2321      Cell cell = result.getColumnLatestCell(f, null);
2322      assertTrue(failMsg, Bytes.equals(data, 0, data.length, cell.getValueArray(),
2323        cell.getValueOffset(), cell.getValueLength()));
2324    }
2325  }
2326
2327  public void deleteNumericRows(final Table t, final byte[] f, int startRow, int endRow)
2328    throws IOException {
2329    for (int i = startRow; i < endRow; i++) {
2330      byte[] data = Bytes.toBytes(String.valueOf(i));
2331      Delete delete = new Delete(data);
2332      delete.addFamily(f);
2333      t.delete(delete);
2334    }
2335  }
2336
2337  /**
2338   * Return the number of rows in the given table.
2339   * @param table to count rows
2340   * @return count of rows
2341   */
2342  public int countRows(final Table table) throws IOException {
2343    return countRows(table, new Scan());
2344  }
2345
2346  public int countRows(final Table table, final Scan scan) throws IOException {
2347    try (ResultScanner results = table.getScanner(scan)) {
2348      int count = 0;
2349      while (results.next() != null) {
2350        count++;
2351      }
2352      return count;
2353    }
2354  }
2355
2356  public int countRows(final Table table, final byte[]... families) throws IOException {
2357    Scan scan = new Scan();
2358    for (byte[] family : families) {
2359      scan.addFamily(family);
2360    }
2361    return countRows(table, scan);
2362  }
2363
2364  /**
2365   * Return the number of rows in the given table.
2366   */
2367  public int countRows(final TableName tableName) throws IOException {
2368    Table table = getConnection().getTable(tableName);
2369    try {
2370      return countRows(table);
2371    } finally {
2372      table.close();
2373    }
2374  }
2375
2376  public int countRows(final Region region) throws IOException {
2377    return countRows(region, new Scan());
2378  }
2379
2380  public int countRows(final Region region, final Scan scan) throws IOException {
2381    InternalScanner scanner = region.getScanner(scan);
2382    try {
2383      return countRows(scanner);
2384    } finally {
2385      scanner.close();
2386    }
2387  }
2388
2389  public int countRows(final InternalScanner scanner) throws IOException {
2390    int scannedCount = 0;
2391    List<Cell> results = new ArrayList<>();
2392    boolean hasMore = true;
2393    while (hasMore) {
2394      hasMore = scanner.next(results);
2395      scannedCount += results.size();
2396      results.clear();
2397    }
2398    return scannedCount;
2399  }
2400
2401  /**
2402   * Return an md5 digest of the entire contents of a table.
2403   */
2404  public String checksumRows(final Table table) throws Exception {
2405
2406    Scan scan = new Scan();
2407    ResultScanner results = table.getScanner(scan);
2408    MessageDigest digest = MessageDigest.getInstance("MD5");
2409    for (Result res : results) {
2410      digest.update(res.getRow());
2411    }
2412    results.close();
2413    return digest.toString();
2414  }
2415
2416  /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */
2417  public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
2418  static {
2419    int i = 0;
2420    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
2421      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
2422        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
2423          ROWS[i][0] = b1;
2424          ROWS[i][1] = b2;
2425          ROWS[i][2] = b3;
2426          i++;
2427        }
2428      }
2429    }
2430  }
2431
2432  public static final byte[][] KEYS = { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("bbb"),
2433    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
2434    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
2435    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2436    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
2437    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
2438    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy") };
2439
2440  public static final byte[][] KEYS_FOR_HBA_CREATE_TABLE = { Bytes.toBytes("bbb"),
2441    Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"),
2442    Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("jjj"),
2443    Bytes.toBytes("kkk"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"),
2444    Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"),
2445    Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"),
2446    Bytes.toBytes("www"), Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz") };
2447
2448  /**
2449   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
2450   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
2451   * @return list of region info for regions added to meta
2452   * @deprecated since 2.0 version and will be removed in 3.0 version. use
2453   *             {@link #createMultiRegionsInMeta(Configuration, TableDescriptor, byte[][])}
2454   */
2455  @Deprecated
2456  public List<HRegionInfo> createMultiRegionsInMeta(final Configuration conf,
2457    final HTableDescriptor htd, byte[][] startKeys) throws IOException {
2458    return createMultiRegionsInMeta(conf, (TableDescriptor) htd, startKeys).stream()
2459      .map(ImmutableHRegionInfo::new).collect(Collectors.toList());
2460  }
2461
2462  /**
2463   * Create rows in hbase:meta for regions of the specified table with the specified start keys. The
2464   * first startKey should be a 0 length byte array if you want to form a proper range of regions.
2465   * @return list of region info for regions added to meta
2466   */
2467  public List<RegionInfo> createMultiRegionsInMeta(final Configuration conf,
2468    final TableDescriptor htd, byte[][] startKeys) throws IOException {
2469    Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
2470    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
2471    List<RegionInfo> newRegions = new ArrayList<>(startKeys.length);
2472    MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
2473      TableState.State.ENABLED);
2474    // add custom ones
2475    for (int i = 0; i < startKeys.length; i++) {
2476      int j = (i + 1) % startKeys.length;
2477      RegionInfo hri = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKeys[i])
2478        .setEndKey(startKeys[j]).build();
2479      MetaTableAccessor.addRegionsToMeta(getConnection(), Collections.singletonList(hri), 1);
2480      newRegions.add(hri);
2481    }
2482
2483    meta.close();
2484    return newRegions;
2485  }
2486
2487  /**
2488   * Create an unmanaged WAL. Be sure to close it when you're through.
2489   */
2490  public static WAL createWal(final Configuration conf, final Path rootDir, final RegionInfo hri)
2491    throws IOException {
2492    // The WAL subsystem will use the default rootDir rather than the passed in rootDir
2493    // unless I pass along via the conf.
2494    Configuration confForWAL = new Configuration(conf);
2495    confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
2496    return new WALFactory(confForWAL, "hregion-" + RandomStringUtils.randomNumeric(8)).getWAL(hri);
2497  }
2498
2499  /**
2500   * Create a region with it's own WAL. Be sure to call
2501   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2502   */
2503  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2504    final Configuration conf, final TableDescriptor htd) throws IOException {
2505    return createRegionAndWAL(info, rootDir, conf, htd, true);
2506  }
2507
2508  /**
2509   * Create a region with it's own WAL. Be sure to call
2510   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2511   */
2512  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2513    final Configuration conf, final TableDescriptor htd, BlockCache blockCache) throws IOException {
2514    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2515    region.setBlockCache(blockCache);
2516    region.initialize();
2517    return region;
2518  }
2519
2520  /**
2521   * Create a region with it's own WAL. Be sure to call
2522   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2523   */
2524  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2525    final Configuration conf, final TableDescriptor htd, MobFileCache mobFileCache)
2526    throws IOException {
2527    HRegion region = createRegionAndWAL(info, rootDir, conf, htd, false);
2528    region.setMobFileCache(mobFileCache);
2529    region.initialize();
2530    return region;
2531  }
2532
2533  /**
2534   * Create a region with it's own WAL. Be sure to call
2535   * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources.
2536   */
2537  public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootDir,
2538    final Configuration conf, final TableDescriptor htd, boolean initialize) throws IOException {
2539    ChunkCreator.initialize(MemStoreLAB.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null,
2540      MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT);
2541    WAL wal = createWal(conf, rootDir, info);
2542    return HRegion.createHRegion(info, rootDir, conf, htd, wal, initialize);
2543  }
2544
2545  /**
2546   * Returns all rows from the hbase:meta table.
2547   * @throws IOException When reading the rows fails.
2548   */
2549  public List<byte[]> getMetaTableRows() throws IOException {
2550    // TODO: Redo using MetaTableAccessor class
2551    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2552    List<byte[]> rows = new ArrayList<>();
2553    ResultScanner s = t.getScanner(new Scan());
2554    for (Result result : s) {
2555      LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()));
2556      rows.add(result.getRow());
2557    }
2558    s.close();
2559    t.close();
2560    return rows;
2561  }
2562
2563  /**
2564   * Returns all rows from the hbase:meta table for a given user table
2565   * @throws IOException When reading the rows fails.
2566   */
2567  public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
2568    // TODO: Redo using MetaTableAccessor.
2569    Table t = getConnection().getTable(TableName.META_TABLE_NAME);
2570    List<byte[]> rows = new ArrayList<>();
2571    ResultScanner s = t.getScanner(new Scan());
2572    for (Result result : s) {
2573      RegionInfo info = MetaTableAccessor.getRegionInfo(result);
2574      if (info == null) {
2575        LOG.error("No region info for row " + Bytes.toString(result.getRow()));
2576        // TODO figure out what to do for this new hosed case.
2577        continue;
2578      }
2579
2580      if (info.getTable().equals(tableName)) {
2581        LOG.info("getMetaTableRows: row -> " + Bytes.toStringBinary(result.getRow()) + info);
2582        rows.add(result.getRow());
2583      }
2584    }
2585    s.close();
2586    t.close();
2587    return rows;
2588  }
2589
2590  /**
2591   * Returns all regions of the specified table
2592   * @param tableName the table name
2593   * @return all regions of the specified table
2594   * @throws IOException when getting the regions fails.
2595   */
2596  private List<RegionInfo> getRegions(TableName tableName) throws IOException {
2597    try (Admin admin = getConnection().getAdmin()) {
2598      return admin.getRegions(tableName);
2599    }
2600  }
2601
2602  /**
2603   * Find any other region server which is different from the one identified by parameter
2604   * @return another region server
2605   */
2606  public HRegionServer getOtherRegionServer(HRegionServer rs) {
2607    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
2608      if (!(rst.getRegionServer() == rs)) {
2609        return rst.getRegionServer();
2610      }
2611    }
2612    return null;
2613  }
2614
2615  /**
2616   * Tool to get the reference to the region server object that holds the region of the specified
2617   * user table.
2618   * @param tableName user table to lookup in hbase:meta
2619   * @return region server that holds it, null if the row doesn't exist
2620   */
2621  public HRegionServer getRSForFirstRegionInTable(TableName tableName)
2622    throws IOException, InterruptedException {
2623    List<RegionInfo> regions = getRegions(tableName);
2624    if (regions == null || regions.isEmpty()) {
2625      return null;
2626    }
2627    LOG.debug("Found " + regions.size() + " regions for table " + tableName);
2628
2629    byte[] firstRegionName =
2630      regions.stream().filter(r -> !r.isOffline()).map(RegionInfo::getRegionName).findFirst()
2631        .orElseThrow(() -> new IOException("online regions not found in table " + tableName));
2632
2633    LOG.debug("firstRegionName=" + Bytes.toString(firstRegionName));
2634    long pause = getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE,
2635      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
2636    int numRetries = getConfiguration().getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
2637      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
2638    RetryCounter retrier = new RetryCounter(numRetries + 1, (int) pause, TimeUnit.MICROSECONDS);
2639    while (retrier.shouldRetry()) {
2640      int index = getMiniHBaseCluster().getServerWith(firstRegionName);
2641      if (index != -1) {
2642        return getMiniHBaseCluster().getRegionServerThreads().get(index).getRegionServer();
2643      }
2644      // Came back -1. Region may not be online yet. Sleep a while.
2645      retrier.sleepUntilNextRetry();
2646    }
2647    return null;
2648  }
2649
2650  /**
2651   * Starts a <code>MiniMRCluster</code> with a default number of <code>TaskTracker</code>'s.
2652   * @throws IOException When starting the cluster fails.
2653   */
2654  public MiniMRCluster startMiniMapReduceCluster() throws IOException {
2655    // Set a very high max-disk-utilization percentage to avoid the NodeManagers from failing.
2656    conf.setIfUnset("yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage",
2657      "99.0");
2658    startMiniMapReduceCluster(2);
2659    return mrCluster;
2660  }
2661
2662  /**
2663   * Tasktracker has a bug where changing the hadoop.log.dir system property will not change its
2664   * internal static LOG_DIR variable.
2665   */
2666  private void forceChangeTaskLogDir() {
2667    Field logDirField;
2668    try {
2669      logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
2670      logDirField.setAccessible(true);
2671
2672      Field modifiersField = ReflectionUtils.getModifiersField();
2673      modifiersField.setAccessible(true);
2674      modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
2675
2676      logDirField.set(null, new File(hadoopLogDir, "userlogs"));
2677    } catch (SecurityException e) {
2678      throw new RuntimeException(e);
2679    } catch (NoSuchFieldException e) {
2680      // TODO Auto-generated catch block
2681      throw new RuntimeException(e);
2682    } catch (IllegalArgumentException e) {
2683      throw new RuntimeException(e);
2684    } catch (IllegalAccessException e) {
2685      throw new RuntimeException(e);
2686    }
2687  }
2688
2689  /**
2690   * Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
2691   * filesystem.
2692   * @param servers The number of <code>TaskTracker</code>'s to start.
2693   * @throws IOException When starting the cluster fails.
2694   */
2695  private void startMiniMapReduceCluster(final int servers) throws IOException {
2696    if (mrCluster != null) {
2697      throw new IllegalStateException("MiniMRCluster is already running");
2698    }
2699    LOG.info("Starting mini mapreduce cluster...");
2700    setupClusterTestDir();
2701    createDirsAndSetProperties();
2702
2703    forceChangeTaskLogDir();
2704
2705    //// hadoop2 specific settings
2706    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
2707    // we up the VM usable so that processes don't get killed.
2708    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);
2709
2710    // Tests were failing due to MAPREDUCE-4880 / MAPREDUCE-4607 against hadoop 2.0.2-alpha and
2711    // this avoids the problem by disabling speculative task execution in tests.
2712    conf.setBoolean("mapreduce.map.speculative", false);
2713    conf.setBoolean("mapreduce.reduce.speculative", false);
2714    ////
2715
2716    // Allow the user to override FS URI for this map-reduce cluster to use.
2717    mrCluster =
2718      new MiniMRCluster(servers, FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(),
2719        1, null, null, new JobConf(this.conf));
2720    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
2721    if (jobConf == null) {
2722      jobConf = mrCluster.createJobConf();
2723    }
2724    // Hadoop MiniMR overwrites this while it should not
2725    jobConf.set("mapreduce.cluster.local.dir", conf.get("mapreduce.cluster.local.dir"));
2726    LOG.info("Mini mapreduce cluster started");
2727
2728    // In hadoop2, YARN/MR2 starts a mini cluster with its own conf instance and updates settings.
2729    // Our HBase MR jobs need several of these settings in order to properly run. So we copy the
2730    // necessary config properties here. YARN-129 required adding a few properties.
2731    conf.set("mapreduce.jobtracker.address", jobConf.get("mapreduce.jobtracker.address"));
2732    // this for mrv2 support; mr1 ignores this
2733    conf.set("mapreduce.framework.name", "yarn");
2734    conf.setBoolean("yarn.is.minicluster", true);
2735    String rmAddress = jobConf.get("yarn.resourcemanager.address");
2736    if (rmAddress != null) {
2737      conf.set("yarn.resourcemanager.address", rmAddress);
2738    }
2739    String historyAddress = jobConf.get("mapreduce.jobhistory.address");
2740    if (historyAddress != null) {
2741      conf.set("mapreduce.jobhistory.address", historyAddress);
2742    }
2743    String schedulerAddress = jobConf.get("yarn.resourcemanager.scheduler.address");
2744    if (schedulerAddress != null) {
2745      conf.set("yarn.resourcemanager.scheduler.address", schedulerAddress);
2746    }
2747    String mrJobHistoryWebappAddress = jobConf.get("mapreduce.jobhistory.webapp.address");
2748    if (mrJobHistoryWebappAddress != null) {
2749      conf.set("mapreduce.jobhistory.webapp.address", mrJobHistoryWebappAddress);
2750    }
2751    String yarnRMWebappAddress = jobConf.get("yarn.resourcemanager.webapp.address");
2752    if (yarnRMWebappAddress != null) {
2753      conf.set("yarn.resourcemanager.webapp.address", yarnRMWebappAddress);
2754    }
2755  }
2756
2757  /**
2758   * Stops the previously started <code>MiniMRCluster</code>.
2759   */
2760  public void shutdownMiniMapReduceCluster() {
2761    if (mrCluster != null) {
2762      LOG.info("Stopping mini mapreduce cluster...");
2763      mrCluster.shutdown();
2764      mrCluster = null;
2765      LOG.info("Mini mapreduce cluster stopped");
2766    }
2767    // Restore configuration to point to local jobtracker
2768    conf.set("mapreduce.jobtracker.address", "local");
2769  }
2770
2771  /**
2772   * Create a stubbed out RegionServerService, mainly for getting FS.
2773   */
2774  public RegionServerServices createMockRegionServerService() throws IOException {
2775    return createMockRegionServerService((ServerName) null);
2776  }
2777
2778  /**
2779   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
2780   * TestTokenAuthentication
2781   */
2782  public RegionServerServices createMockRegionServerService(RpcServerInterface rpc)
2783    throws IOException {
2784    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher());
2785    rss.setFileSystem(getTestFileSystem());
2786    rss.setRpcServer(rpc);
2787    return rss;
2788  }
2789
2790  /**
2791   * Create a stubbed out RegionServerService, mainly for getting FS. This version is used by
2792   * TestOpenRegionHandler
2793   */
2794  public RegionServerServices createMockRegionServerService(ServerName name) throws IOException {
2795    final MockRegionServerServices rss = new MockRegionServerServices(getZooKeeperWatcher(), name);
2796    rss.setFileSystem(getTestFileSystem());
2797    return rss;
2798  }
2799
2800  /**
2801   * Switches the logger for the given class to DEBUG level.
2802   * @param clazz The class for which to switch to debug logging.
2803   * @deprecated In 2.3.0, will be removed in 4.0.0. Only support changing log level on log4j now as
2804   *             HBase only uses log4j. You should do this by your own as it you know which log
2805   *             framework you are using then set the log level to debug is very easy.
2806   */
2807  @Deprecated
2808  public void enableDebug(Class<?> clazz) {
2809    Log4jUtils.enableDebug(clazz);
2810  }
2811
2812  /**
2813   * Expire the Master's session
2814   */
2815  public void expireMasterSession() throws Exception {
2816    HMaster master = getMiniHBaseCluster().getMaster();
2817    expireSession(master.getZooKeeper(), false);
2818  }
2819
2820  /**
2821   * Expire a region server's session
2822   * @param index which RS
2823   */
2824  public void expireRegionServerSession(int index) throws Exception {
2825    HRegionServer rs = getMiniHBaseCluster().getRegionServer(index);
2826    expireSession(rs.getZooKeeper(), false);
2827    decrementMinRegionServerCount();
2828  }
2829
2830  private void decrementMinRegionServerCount() {
2831    // decrement the count for this.conf, for newly spwaned master
2832    // this.hbaseCluster shares this configuration too
2833    decrementMinRegionServerCount(getConfiguration());
2834
2835    // each master thread keeps a copy of configuration
2836    for (MasterThread master : getHBaseCluster().getMasterThreads()) {
2837      decrementMinRegionServerCount(master.getMaster().getConfiguration());
2838    }
2839  }
2840
2841  private void decrementMinRegionServerCount(Configuration conf) {
2842    int currentCount = conf.getInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
2843    if (currentCount != -1) {
2844      conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, Math.max(currentCount - 1, 1));
2845    }
2846  }
2847
2848  public void expireSession(ZKWatcher nodeZK) throws Exception {
2849    expireSession(nodeZK, false);
2850  }
2851
2852  /**
2853   * Expire a ZooKeeper session as recommended in ZooKeeper documentation
2854   * http://hbase.apache.org/book.html#trouble.zookeeper There are issues when doing this: [1]
2855   * http://www.mail-archive.com/dev@zookeeper.apache.org/msg01942.html [2]
2856   * https://issues.apache.org/jira/browse/ZOOKEEPER-1105
2857   * @param nodeZK      - the ZK watcher to expire
2858   * @param checkStatus - true to check if we can create a Table with the current configuration.
2859   */
2860  public void expireSession(ZKWatcher nodeZK, boolean checkStatus) throws Exception {
2861    Configuration c = new Configuration(this.conf);
2862    String quorumServers = ZKConfig.getZKQuorumServersString(c);
2863    ZooKeeper zk = nodeZK.getRecoverableZooKeeper().getZooKeeper();
2864    byte[] password = zk.getSessionPasswd();
2865    long sessionID = zk.getSessionId();
2866
2867    // Expiry seems to be asynchronous (see comment from P. Hunt in [1]),
2868    // so we create a first watcher to be sure that the
2869    // event was sent. We expect that if our watcher receives the event
2870    // other watchers on the same machine will get is as well.
2871    // When we ask to close the connection, ZK does not close it before
2872    // we receive all the events, so don't have to capture the event, just
2873    // closing the connection should be enough.
2874    ZooKeeper monitor = new ZooKeeper(quorumServers, 1000, new org.apache.zookeeper.Watcher() {
2875      @Override
2876      public void process(WatchedEvent watchedEvent) {
2877        LOG.info("Monitor ZKW received event=" + watchedEvent);
2878      }
2879    }, sessionID, password);
2880
2881    // Making it expire
2882    ZooKeeper newZK =
2883      new ZooKeeper(quorumServers, 1000, EmptyWatcher.instance, sessionID, password);
2884
2885    // ensure that we have connection to the server before closing down, otherwise
2886    // the close session event will be eaten out before we start CONNECTING state
2887    long start = System.currentTimeMillis();
2888    while (newZK.getState() != States.CONNECTED && System.currentTimeMillis() - start < 1000) {
2889      Thread.sleep(1);
2890    }
2891    newZK.close();
2892    LOG.info("ZK Closed Session 0x" + Long.toHexString(sessionID));
2893
2894    // Now closing & waiting to be sure that the clients get it.
2895    monitor.close();
2896
2897    if (checkStatus) {
2898      getConnection().getTable(TableName.META_TABLE_NAME).close();
2899    }
2900  }
2901
2902  /**
2903   * Get the Mini HBase cluster.
2904   * @return hbase cluster
2905   * @see #getHBaseClusterInterface()
2906   */
2907  public MiniHBaseCluster getHBaseCluster() {
2908    return getMiniHBaseCluster();
2909  }
2910
2911  /**
2912   * Returns the HBaseCluster instance.
2913   * <p>
2914   * Returned object can be any of the subclasses of HBaseCluster, and the tests referring this
2915   * should not assume that the cluster is a mini cluster or a distributed one. If the test only
2916   * works on a mini cluster, then specific method {@link #getMiniHBaseCluster()} can be used
2917   * instead w/o the need to type-cast.
2918   */
2919  public HBaseCluster getHBaseClusterInterface() {
2920    // implementation note: we should rename this method as #getHBaseCluster(),
2921    // but this would require refactoring 90+ calls.
2922    return hbaseCluster;
2923  }
2924
2925  /**
2926   * Resets the connections so that the next time getConnection() is called, a new connection is
2927   * created. This is needed in cases where the entire cluster / all the masters are shutdown and
2928   * the connection is not valid anymore. TODO: There should be a more coherent way of doing this.
2929   * Unfortunately the way tests are written, not all start() stop() calls go through this class.
2930   * Most tests directly operate on the underlying mini/local hbase cluster. That makes it difficult
2931   * for this wrapper class to maintain the connection state automatically. Cleaning this is a much
2932   * bigger refactor.
2933   */
2934  public void invalidateConnection() throws IOException {
2935    closeConnection();
2936    // Update the master addresses if they changed.
2937    final String masterConfigBefore = conf.get(HConstants.MASTER_ADDRS_KEY);
2938    final String masterConfAfter = getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY);
2939    LOG.info("Invalidated connection. Updating master addresses before: {} after: {}",
2940      masterConfigBefore, masterConfAfter);
2941    conf.set(HConstants.MASTER_ADDRS_KEY,
2942      getMiniHBaseCluster().conf.get(HConstants.MASTER_ADDRS_KEY));
2943  }
2944
2945  /**
2946   * Get a shared Connection to the cluster. this method is threadsafe.
2947   * @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
2948   */
2949  public Connection getConnection() throws IOException {
2950    try {
2951      return this.connection.updateAndGet(connection -> {
2952        if (connection == null) {
2953          try {
2954            connection = ConnectionFactory.createConnection(this.conf);
2955          } catch (IOException ioe) {
2956            throw new UncheckedIOException("Failed to create connection", ioe);
2957          }
2958        }
2959        return connection;
2960      });
2961    } catch (UncheckedIOException exception) {
2962      throw exception.getCause();
2963    }
2964  }
2965
2966  /**
2967   * Returns a Admin instance. This instance is shared between HBaseTestingUtility instance users.
2968   * Closing it has no effect, it will be closed automatically when the cluster shutdowns
2969   * @return HBaseAdmin instance which is guaranteed to support only {@link Admin} interface.
2970   *         Functions in HBaseAdmin not provided by {@link Admin} interface can be changed/deleted
2971   *         anytime.
2972   * @deprecated Since 2.0. Will be removed in 3.0. Use {@link #getAdmin()} instead.
2973   */
2974  @Deprecated
2975  public synchronized HBaseAdmin getHBaseAdmin() throws IOException {
2976    if (hbaseAdmin == null) {
2977      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
2978    }
2979    return hbaseAdmin;
2980  }
2981
2982  public void closeConnection() throws IOException {
2983    if (hbaseAdmin != null) {
2984      Closeables.close(hbaseAdmin, true);
2985      hbaseAdmin = null;
2986    }
2987    Connection connection = this.connection.getAndSet(null);
2988    if (connection != null) {
2989      Closeables.close(connection, true);
2990    }
2991  }
2992
2993  /**
2994   * Returns an Admin instance which is shared between HBaseTestingUtility instance users. Closing
2995   * it has no effect, it will be closed automatically when the cluster shutdowns
2996   */
2997  public synchronized Admin getAdmin() throws IOException {
2998    if (hbaseAdmin == null) {
2999      this.hbaseAdmin = (HBaseAdmin) getConnection().getAdmin();
3000    }
3001    return hbaseAdmin;
3002  }
3003
3004  private HBaseAdmin hbaseAdmin = null;
3005
3006  /**
3007   * Returns an {@link Hbck} instance. Needs be closed when done.
3008   */
3009  public Hbck getHbck() throws IOException {
3010    return getConnection().getHbck();
3011  }
3012
3013  /**
3014   * Unassign the named region.
3015   * @param regionName The region to unassign.
3016   */
3017  public void unassignRegion(String regionName) throws IOException {
3018    unassignRegion(Bytes.toBytes(regionName));
3019  }
3020
3021  /**
3022   * Unassign the named region.
3023   * @param regionName The region to unassign.
3024   */
3025  public void unassignRegion(byte[] regionName) throws IOException {
3026    getAdmin().unassign(regionName, true);
3027  }
3028
3029  /**
3030   * Closes the region containing the given row.
3031   * @param row   The row to find the containing region.
3032   * @param table The table to find the region.
3033   */
3034  public void unassignRegionByRow(String row, RegionLocator table) throws IOException {
3035    unassignRegionByRow(Bytes.toBytes(row), table);
3036  }
3037
3038  /**
3039   * Closes the region containing the given row.
3040   * @param row   The row to find the containing region.
3041   * @param table The table to find the region.
3042   */
3043  public void unassignRegionByRow(byte[] row, RegionLocator table) throws IOException {
3044    HRegionLocation hrl = table.getRegionLocation(row);
3045    unassignRegion(hrl.getRegionInfo().getRegionName());
3046  }
3047
3048  /**
3049   * Retrieves a splittable region randomly from tableName
3050   * @param tableName   name of table
3051   * @param maxAttempts maximum number of attempts, unlimited for value of -1
3052   * @return the HRegion chosen, null if none was found within limit of maxAttempts
3053   */
3054  public HRegion getSplittableRegion(TableName tableName, int maxAttempts) {
3055    List<HRegion> regions = getHBaseCluster().getRegions(tableName);
3056    int regCount = regions.size();
3057    Set<Integer> attempted = new HashSet<>();
3058    int idx;
3059    int attempts = 0;
3060    do {
3061      regions = getHBaseCluster().getRegions(tableName);
3062      if (regCount != regions.size()) {
3063        // if there was region movement, clear attempted Set
3064        attempted.clear();
3065      }
3066      regCount = regions.size();
3067      // There are chances that before we get the region for the table from an RS the region may
3068      // be going for CLOSE. This may be because online schema change is enabled
3069      if (regCount > 0) {
3070        idx = ThreadLocalRandom.current().nextInt(regCount);
3071        // if we have just tried this region, there is no need to try again
3072        if (attempted.contains(idx)) {
3073          continue;
3074        }
3075        HRegion region = regions.get(idx);
3076        if (region.checkSplit().isPresent()) {
3077          return region;
3078        }
3079        attempted.add(idx);
3080      }
3081      attempts++;
3082    } while (maxAttempts == -1 || attempts < maxAttempts);
3083    return null;
3084  }
3085
3086  public MiniDFSCluster getDFSCluster() {
3087    return dfsCluster;
3088  }
3089
3090  public void setDFSCluster(MiniDFSCluster cluster) throws IllegalStateException, IOException {
3091    setDFSCluster(cluster, true);
3092  }
3093
3094  /**
3095   * Set the MiniDFSCluster
3096   * @param cluster     cluster to use
3097   * @param requireDown require the that cluster not be "up" (MiniDFSCluster#isClusterUp) before it
3098   *                    is set.
3099   * @throws IllegalStateException if the passed cluster is up when it is required to be down
3100   * @throws IOException           if the FileSystem could not be set from the passed dfs cluster
3101   */
3102  public void setDFSCluster(MiniDFSCluster cluster, boolean requireDown)
3103    throws IllegalStateException, IOException {
3104    if (dfsCluster != null && requireDown && dfsCluster.isClusterUp()) {
3105      throw new IllegalStateException("DFSCluster is already running! Shut it down first.");
3106    }
3107    this.dfsCluster = cluster;
3108    this.setFs();
3109  }
3110
3111  public FileSystem getTestFileSystem() throws IOException {
3112    return HFileSystem.get(conf);
3113  }
3114
3115  /**
3116   * Wait until all regions in a table have been assigned. Waits default timeout before giving up
3117   * (30 seconds).
3118   * @param table Table to wait on.
3119   */
3120  public void waitTableAvailable(TableName table) throws InterruptedException, IOException {
3121    waitTableAvailable(table.getName(), 30000);
3122  }
3123
3124  public void waitTableAvailable(TableName table, long timeoutMillis)
3125    throws InterruptedException, IOException {
3126    waitFor(timeoutMillis, predicateTableAvailable(table));
3127  }
3128
3129  /**
3130   * Wait until all regions in a table have been assigned
3131   * @param table         Table to wait on.
3132   * @param timeoutMillis Timeout.
3133   */
3134  public void waitTableAvailable(byte[] table, long timeoutMillis)
3135    throws InterruptedException, IOException {
3136    waitFor(timeoutMillis, predicateTableAvailable(TableName.valueOf(table)));
3137  }
3138
3139  public String explainTableAvailability(TableName tableName) throws IOException {
3140    String msg = explainTableState(tableName, TableState.State.ENABLED) + ", ";
3141    if (getHBaseCluster().getMaster().isAlive()) {
3142      Map<RegionInfo, ServerName> assignments = getHBaseCluster().getMaster().getAssignmentManager()
3143        .getRegionStates().getRegionAssignments();
3144      final List<Pair<RegionInfo, ServerName>> metaLocations =
3145        MetaTableAccessor.getTableRegionsAndLocations(getConnection(), tableName);
3146      for (Pair<RegionInfo, ServerName> metaLocation : metaLocations) {
3147        RegionInfo hri = metaLocation.getFirst();
3148        ServerName sn = metaLocation.getSecond();
3149        if (!assignments.containsKey(hri)) {
3150          msg += ", region " + hri + " not assigned, but found in meta, it expected to be on " + sn;
3151
3152        } else if (sn == null) {
3153          msg += ",  region " + hri + " assigned,  but has no server in meta";
3154        } else if (!sn.equals(assignments.get(hri))) {
3155          msg += ",  region " + hri + " assigned,  but has different servers in meta and AM ( " + sn
3156            + " <> " + assignments.get(hri);
3157        }
3158      }
3159    }
3160    return msg;
3161  }
3162
3163  public String explainTableState(final TableName table, TableState.State state)
3164    throws IOException {
3165    TableState tableState = MetaTableAccessor.getTableState(getConnection(), table);
3166    if (tableState == null) {
3167      return "TableState in META: No table state in META for table " + table
3168        + " last state in meta (including deleted is " + findLastTableState(table) + ")";
3169    } else if (!tableState.inStates(state)) {
3170      return "TableState in META: Not " + state + " state, but " + tableState;
3171    } else {
3172      return "TableState in META: OK";
3173    }
3174  }
3175
3176  @Nullable
3177  public TableState findLastTableState(final TableName table) throws IOException {
3178    final AtomicReference<TableState> lastTableState = new AtomicReference<>(null);
3179    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
3180      @Override
3181      public boolean visit(Result r) throws IOException {
3182        if (!Arrays.equals(r.getRow(), table.getName())) return false;
3183        TableState state = MetaTableAccessor.getTableState(r);
3184        if (state != null) lastTableState.set(state);
3185        return true;
3186      }
3187    };
3188    MetaTableAccessor.scanMeta(getConnection(), null, null, MetaTableAccessor.QueryType.TABLE,
3189      Integer.MAX_VALUE, visitor);
3190    return lastTableState.get();
3191  }
3192
3193  /**
3194   * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions
3195   * have been all assigned. Will timeout after default period (30 seconds) Tolerates nonexistent
3196   * table.
3197   * @param table the table to wait on.
3198   * @throws InterruptedException if interrupted while waiting
3199   * @throws IOException          if an IO problem is encountered
3200   */
3201  public void waitTableEnabled(TableName table) throws InterruptedException, IOException {
3202    waitTableEnabled(table, 30000);
3203  }
3204
3205  /**
3206   * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the regions
3207   * have been all assigned.
3208   * @see #waitTableEnabled(TableName, long)
3209   * @param table         Table to wait on.
3210   * @param timeoutMillis Time to wait on it being marked enabled.
3211   */
3212  public void waitTableEnabled(byte[] table, long timeoutMillis)
3213    throws InterruptedException, IOException {
3214    waitTableEnabled(TableName.valueOf(table), timeoutMillis);
3215  }
3216
3217  public void waitTableEnabled(TableName table, long timeoutMillis) throws IOException {
3218    waitFor(timeoutMillis, predicateTableEnabled(table));
3219  }
3220
3221  /**
3222   * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' Will timeout
3223   * after default period (30 seconds)
3224   * @param table Table to wait on.
3225   */
3226  public void waitTableDisabled(byte[] table) throws InterruptedException, IOException {
3227    waitTableDisabled(table, 30000);
3228  }
3229
3230  public void waitTableDisabled(TableName table, long millisTimeout)
3231    throws InterruptedException, IOException {
3232    waitFor(millisTimeout, predicateTableDisabled(table));
3233  }
3234
3235  /**
3236   * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled'
3237   * @param table         Table to wait on.
3238   * @param timeoutMillis Time to wait on it being marked disabled.
3239   */
3240  public void waitTableDisabled(byte[] table, long timeoutMillis)
3241    throws InterruptedException, IOException {
3242    waitTableDisabled(TableName.valueOf(table), timeoutMillis);
3243  }
3244
3245  /**
3246   * Make sure that at least the specified number of region servers are running
3247   * @param num minimum number of region servers that should be running
3248   * @return true if we started some servers
3249   */
3250  public boolean ensureSomeRegionServersAvailable(final int num) throws IOException {
3251    boolean startedServer = false;
3252    MiniHBaseCluster hbaseCluster = getMiniHBaseCluster();
3253    for (int i = hbaseCluster.getLiveRegionServerThreads().size(); i < num; ++i) {
3254      LOG.info("Started new server=" + hbaseCluster.startRegionServer());
3255      startedServer = true;
3256    }
3257
3258    return startedServer;
3259  }
3260
3261  /**
3262   * Make sure that at least the specified number of region servers are running. We don't count the
3263   * ones that are currently stopping or are stopped.
3264   * @param num minimum number of region servers that should be running
3265   * @return true if we started some servers
3266   */
3267  public boolean ensureSomeNonStoppedRegionServersAvailable(final int num) throws IOException {
3268    boolean startedServer = ensureSomeRegionServersAvailable(num);
3269
3270    int nonStoppedServers = 0;
3271    for (JVMClusterUtil.RegionServerThread rst : getMiniHBaseCluster().getRegionServerThreads()) {
3272
3273      HRegionServer hrs = rst.getRegionServer();
3274      if (hrs.isStopping() || hrs.isStopped()) {
3275        LOG.info("A region server is stopped or stopping:" + hrs);
3276      } else {
3277        nonStoppedServers++;
3278      }
3279    }
3280    for (int i = nonStoppedServers; i < num; ++i) {
3281      LOG.info("Started new server=" + getMiniHBaseCluster().startRegionServer());
3282      startedServer = true;
3283    }
3284    return startedServer;
3285  }
3286
3287  /**
3288   * This method clones the passed <code>c</code> configuration setting a new user into the clone.
3289   * Use it getting new instances of FileSystem. Only works for DistributedFileSystem w/o Kerberos.
3290   * @param c                     Initial configuration
3291   * @param differentiatingSuffix Suffix to differentiate this user from others.
3292   * @return A new configuration instance with a different user set into it.
3293   */
3294  public static User getDifferentUser(final Configuration c, final String differentiatingSuffix)
3295    throws IOException {
3296    FileSystem currentfs = FileSystem.get(c);
3297    if (!(currentfs instanceof DistributedFileSystem) || User.isHBaseSecurityEnabled(c)) {
3298      return User.getCurrent();
3299    }
3300    // Else distributed filesystem. Make a new instance per daemon. Below
3301    // code is taken from the AppendTestUtil over in hdfs.
3302    String username = User.getCurrent().getName() + differentiatingSuffix;
3303    User user = User.createUserForTesting(c, username, new String[] { "supergroup" });
3304    return user;
3305  }
3306
3307  public static NavigableSet<String> getAllOnlineRegions(MiniHBaseCluster cluster)
3308    throws IOException {
3309    NavigableSet<String> online = new TreeSet<>();
3310    for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
3311      try {
3312        for (RegionInfo region : ProtobufUtil
3313          .getOnlineRegions(rst.getRegionServer().getRSRpcServices())) {
3314          online.add(region.getRegionNameAsString());
3315        }
3316      } catch (RegionServerStoppedException e) {
3317        // That's fine.
3318      }
3319    }
3320    for (MasterThread mt : cluster.getLiveMasterThreads()) {
3321      try {
3322        for (RegionInfo region : ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) {
3323          online.add(region.getRegionNameAsString());
3324        }
3325      } catch (RegionServerStoppedException e) {
3326        // That's fine.
3327      } catch (ServerNotRunningYetException e) {
3328        // That's fine.
3329      }
3330    }
3331    return online;
3332  }
3333
3334  /**
3335   * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and makes tests
3336   * linger. Here is the exception you'll see:
3337   *
3338   * <pre>
3339   * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/wal.1276627923013 block
3340   * blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block
3341   * blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683
3342   * failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
3343   * </pre>
3344   *
3345   * @param stream A DFSClient.DFSOutputStream.
3346   */
3347  public static void setMaxRecoveryErrorCount(final OutputStream stream, final int max) {
3348    try {
3349      Class<?>[] clazzes = DFSClient.class.getDeclaredClasses();
3350      for (Class<?> clazz : clazzes) {
3351        String className = clazz.getSimpleName();
3352        if (className.equals("DFSOutputStream")) {
3353          if (clazz.isInstance(stream)) {
3354            Field maxRecoveryErrorCountField =
3355              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
3356            maxRecoveryErrorCountField.setAccessible(true);
3357            maxRecoveryErrorCountField.setInt(stream, max);
3358            break;
3359          }
3360        }
3361      }
3362    } catch (Exception e) {
3363      LOG.info("Could not set max recovery field", e);
3364    }
3365  }
3366
3367  /**
3368   * Uses directly the assignment manager to assign the region. and waits until the specified region
3369   * has completed assignment.
3370   * @return true if the region is assigned false otherwise.
3371   */
3372  public boolean assignRegion(final RegionInfo regionInfo)
3373    throws IOException, InterruptedException {
3374    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
3375    am.assign(regionInfo);
3376    return AssignmentTestingUtil.waitForAssignment(am, regionInfo);
3377  }
3378
3379  /**
3380   * Move region to destination server and wait till region is completely moved and online
3381   * @param destRegion region to move
3382   * @param destServer destination server of the region
3383   */
3384  public void moveRegionAndWait(RegionInfo destRegion, ServerName destServer)
3385    throws InterruptedException, IOException {
3386    HMaster master = getMiniHBaseCluster().getMaster();
3387    // TODO: Here we start the move. The move can take a while.
3388    getAdmin().move(destRegion.getEncodedNameAsBytes(), destServer);
3389    while (true) {
3390      ServerName serverName =
3391        master.getAssignmentManager().getRegionStates().getRegionServerOfRegion(destRegion);
3392      if (serverName != null && serverName.equals(destServer)) {
3393        assertRegionOnServer(destRegion, serverName, 2000);
3394        break;
3395      }
3396      Thread.sleep(10);
3397    }
3398  }
3399
3400  /**
3401   * Wait until all regions for a table in hbase:meta have a non-empty info:server, up to a
3402   * configuable timeout value (default is 60 seconds) This means all regions have been deployed,
3403   * master has been informed and updated hbase:meta with the regions deployed server.
3404   * @param tableName the table name
3405   */
3406  public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOException {
3407    waitUntilAllRegionsAssigned(tableName,
3408      this.conf.getLong("hbase.client.sync.wait.timeout.msec", 60000));
3409  }
3410
3411  /**
3412   * Waith until all system table's regions get assigned
3413   */
3414  public void waitUntilAllSystemRegionsAssigned() throws IOException {
3415    waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
3416    waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
3417  }
3418
3419  /**
3420   * Wait until all regions for a table in hbase:meta have a non-empty info:server, or until
3421   * timeout. This means all regions have been deployed, master has been informed and updated
3422   * hbase:meta with the regions deployed server.
3423   * @param tableName the table name
3424   * @param timeout   timeout, in milliseconds
3425   */
3426  public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
3427    throws IOException {
3428    if (!TableName.isMetaTableName(tableName)) {
3429      try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
3430        LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = "
3431          + timeout + "ms");
3432        waitFor(timeout, 200, true, new ExplainingPredicate<IOException>() {
3433          @Override
3434          public String explainFailure() throws IOException {
3435            return explainTableAvailability(tableName);
3436          }
3437
3438          @Override
3439          public boolean evaluate() throws IOException {
3440            Scan scan = new Scan();
3441            scan.addFamily(HConstants.CATALOG_FAMILY);
3442            boolean tableFound = false;
3443            try (ResultScanner s = meta.getScanner(scan)) {
3444              for (Result r; (r = s.next()) != null;) {
3445                byte[] b = r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
3446                HRegionInfo info = HRegionInfo.parseFromOrNull(b);
3447                if (info != null && info.getTable().equals(tableName)) {
3448                  // Get server hosting this region from catalog family. Return false if no server
3449                  // hosting this region, or if the server hosting this region was recently killed
3450                  // (for fault tolerance testing).
3451                  tableFound = true;
3452                  byte[] server =
3453                    r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
3454                  if (server == null) {
3455                    return false;
3456                  } else {
3457                    byte[] startCode =
3458                      r.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
3459                    ServerName serverName =
3460                      ServerName.valueOf(Bytes.toString(server).replaceFirst(":", ",") + ","
3461                        + Bytes.toLong(startCode));
3462                    if (
3463                      !getHBaseClusterInterface().isDistributedCluster()
3464                        && getHBaseCluster().isKilledRS(serverName)
3465                    ) {
3466                      return false;
3467                    }
3468                  }
3469                  if (RegionStateStore.getRegionState(r, info) != RegionState.State.OPEN) {
3470                    return false;
3471                  }
3472                }
3473              }
3474            }
3475            if (!tableFound) {
3476              LOG.warn(
3477                "Didn't find the entries for table " + tableName + " in meta, already deleted?");
3478            }
3479            return tableFound;
3480          }
3481        });
3482      }
3483    }
3484    LOG.info("All regions for table " + tableName + " assigned to meta. Checking AM states.");
3485    // check from the master state if we are using a mini cluster
3486    if (!getHBaseClusterInterface().isDistributedCluster()) {
3487      // So, all regions are in the meta table but make sure master knows of the assignments before
3488      // returning -- sometimes this can lag.
3489      HMaster master = getHBaseCluster().getMaster();
3490      final RegionStates states = master.getAssignmentManager().getRegionStates();
3491      waitFor(timeout, 200, new ExplainingPredicate<IOException>() {
3492        @Override
3493        public String explainFailure() throws IOException {
3494          return explainTableAvailability(tableName);
3495        }
3496
3497        @Override
3498        public boolean evaluate() throws IOException {
3499          List<RegionInfo> hris = states.getRegionsOfTable(tableName);
3500          return hris != null && !hris.isEmpty();
3501        }
3502      });
3503    }
3504    LOG.info("All regions for table " + tableName + " assigned.");
3505  }
3506
3507  /**
3508   * Do a small get/scan against one store. This is required because store has no actual methods of
3509   * querying itself, and relies on StoreScanner.
3510   */
3511  public static List<Cell> getFromStoreFile(HStore store, Get get) throws IOException {
3512    Scan scan = new Scan(get);
3513    InternalScanner scanner = (InternalScanner) store.getScanner(scan,
3514      scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
3515      // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
3516      // readpoint 0.
3517      0);
3518
3519    List<Cell> result = new ArrayList<>();
3520    scanner.next(result);
3521    if (!result.isEmpty()) {
3522      // verify that we are on the row we want:
3523      Cell kv = result.get(0);
3524      if (!CellUtil.matchingRows(kv, get.getRow())) {
3525        result.clear();
3526      }
3527    }
3528    scanner.close();
3529    return result;
3530  }
3531
3532  /**
3533   * Create region split keys between startkey and endKey
3534   * @param numRegions the number of regions to be created. it has to be greater than 3.
3535   * @return resulting split keys
3536   */
3537  public byte[][] getRegionSplitStartKeys(byte[] startKey, byte[] endKey, int numRegions) {
3538    assertTrue(numRegions > 3);
3539    byte[][] tmpSplitKeys = Bytes.split(startKey, endKey, numRegions - 3);
3540    byte[][] result = new byte[tmpSplitKeys.length + 1][];
3541    System.arraycopy(tmpSplitKeys, 0, result, 1, tmpSplitKeys.length);
3542    result[0] = HConstants.EMPTY_BYTE_ARRAY;
3543    return result;
3544  }
3545
3546  /**
3547   * Do a small get/scan against one store. This is required because store has no actual methods of
3548   * querying itself, and relies on StoreScanner.
3549   */
3550  public static List<Cell> getFromStoreFile(HStore store, byte[] row, NavigableSet<byte[]> columns)
3551    throws IOException {
3552    Get get = new Get(row);
3553    Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
3554    s.put(store.getColumnFamilyDescriptor().getName(), columns);
3555
3556    return getFromStoreFile(store, get);
3557  }
3558
3559  public static void assertKVListsEqual(String additionalMsg, final List<? extends Cell> expected,
3560    final List<? extends Cell> actual) {
3561    final int eLen = expected.size();
3562    final int aLen = actual.size();
3563    final int minLen = Math.min(eLen, aLen);
3564
3565    int i;
3566    for (i = 0; i < minLen
3567      && CellComparator.getInstance().compare(expected.get(i), actual.get(i)) == 0; ++i) {
3568    }
3569
3570    if (additionalMsg == null) {
3571      additionalMsg = "";
3572    }
3573    if (!additionalMsg.isEmpty()) {
3574      additionalMsg = ". " + additionalMsg;
3575    }
3576
3577    if (eLen != aLen || i != minLen) {
3578      throw new AssertionError("Expected and actual KV arrays differ at position " + i + ": "
3579        + safeGetAsStr(expected, i) + " (length " + eLen + ") vs. " + safeGetAsStr(actual, i)
3580        + " (length " + aLen + ")" + additionalMsg);
3581    }
3582  }
3583
3584  public static <T> String safeGetAsStr(List<T> lst, int i) {
3585    if (0 <= i && i < lst.size()) {
3586      return lst.get(i).toString();
3587    } else {
3588      return "<out_of_range>";
3589    }
3590  }
3591
3592  public String getClusterKey() {
3593    return conf.get(HConstants.ZOOKEEPER_QUORUM) + ":" + conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)
3594      + ":"
3595      + conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
3596  }
3597
3598  /** Creates a random table with the given parameters */
3599  public Table createRandomTable(TableName tableName, final Collection<String> families,
3600    final int maxVersions, final int numColsPerRow, final int numFlushes, final int numRegions,
3601    final int numRowsPerFlush) throws IOException, InterruptedException {
3602
3603    LOG.info("\n\nCreating random table " + tableName + " with " + numRegions + " regions, "
3604      + numFlushes + " storefiles per region, " + numRowsPerFlush + " rows per flush, maxVersions="
3605      + maxVersions + "\n");
3606
3607    final int numCF = families.size();
3608    final byte[][] cfBytes = new byte[numCF][];
3609    {
3610      int cfIndex = 0;
3611      for (String cf : families) {
3612        cfBytes[cfIndex++] = Bytes.toBytes(cf);
3613      }
3614    }
3615
3616    final int actualStartKey = 0;
3617    final int actualEndKey = Integer.MAX_VALUE;
3618    final int keysPerRegion = (actualEndKey - actualStartKey) / numRegions;
3619    final int splitStartKey = actualStartKey + keysPerRegion;
3620    final int splitEndKey = actualEndKey - keysPerRegion;
3621    final String keyFormat = "%08x";
3622    final Table table = createTable(tableName, cfBytes, maxVersions,
3623      Bytes.toBytes(String.format(keyFormat, splitStartKey)),
3624      Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions);
3625
3626    if (hbaseCluster != null) {
3627      getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
3628    }
3629
3630    BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
3631
3632    final Random rand = ThreadLocalRandom.current();
3633    for (int iFlush = 0; iFlush < numFlushes; ++iFlush) {
3634      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
3635        final byte[] row = Bytes.toBytes(
3636          String.format(keyFormat, actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));
3637
3638        Put put = new Put(row);
3639        Delete del = new Delete(row);
3640        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
3641          final byte[] cf = cfBytes[rand.nextInt(numCF)];
3642          final long ts = rand.nextInt();
3643          final byte[] qual = Bytes.toBytes("col" + iCol);
3644          if (rand.nextBoolean()) {
3645            final byte[] value =
3646              Bytes.toBytes("value_for_row_" + iRow + "_cf_" + Bytes.toStringBinary(cf) + "_col_"
3647                + iCol + "_ts_" + ts + "_random_" + rand.nextLong());
3648            put.addColumn(cf, qual, ts, value);
3649          } else if (rand.nextDouble() < 0.8) {
3650            del.addColumn(cf, qual, ts);
3651          } else {
3652            del.addColumns(cf, qual, ts);
3653          }
3654        }
3655
3656        if (!put.isEmpty()) {
3657          mutator.mutate(put);
3658        }
3659
3660        if (!del.isEmpty()) {
3661          mutator.mutate(del);
3662        }
3663      }
3664      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
3665      mutator.flush();
3666      if (hbaseCluster != null) {
3667        getMiniHBaseCluster().flushcache(table.getName());
3668      }
3669    }
3670    mutator.close();
3671
3672    return table;
3673  }
3674
3675  public static int randomFreePort() {
3676    return HBaseCommonTestingUtility.randomFreePort();
3677  }
3678
3679  public static String randomMultiCastAddress() {
3680    return "226.1.1." + ThreadLocalRandom.current().nextInt(254);
3681  }
3682
3683  public static void waitForHostPort(String host, int port) throws IOException {
3684    final int maxTimeMs = 10000;
3685    final int maxNumAttempts = maxTimeMs / HConstants.SOCKET_RETRY_WAIT_MS;
3686    IOException savedException = null;
3687    LOG.info("Waiting for server at " + host + ":" + port);
3688    for (int attempt = 0; attempt < maxNumAttempts; ++attempt) {
3689      try {
3690        Socket sock = new Socket(InetAddress.getByName(host), port);
3691        sock.close();
3692        savedException = null;
3693        LOG.info("Server at " + host + ":" + port + " is available");
3694        break;
3695      } catch (UnknownHostException e) {
3696        throw new IOException("Failed to look up " + host, e);
3697      } catch (IOException e) {
3698        savedException = e;
3699      }
3700      Threads.sleepWithoutInterrupt(HConstants.SOCKET_RETRY_WAIT_MS);
3701    }
3702
3703    if (savedException != null) {
3704      throw savedException;
3705    }
3706  }
3707
3708  /**
3709   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3710   * continues.
3711   * @return the number of regions the table was split into
3712   */
3713  public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName,
3714    byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding)
3715    throws IOException {
3716    return createPreSplitLoadTestTable(conf, tableName, columnFamily, compression,
3717      dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1, Durability.USE_DEFAULT);
3718  }
3719
3720  /**
3721   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3722   * continues.
3723   * @return the number of regions the table was split into
3724   */
3725  public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName,
3726    byte[] columnFamily, Algorithm compression, DataBlockEncoding dataBlockEncoding,
3727    int numRegionsPerServer, int regionReplication, Durability durability) throws IOException {
3728    HTableDescriptor desc = new HTableDescriptor(tableName);
3729    desc.setDurability(durability);
3730    desc.setRegionReplication(regionReplication);
3731    HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
3732    hcd.setDataBlockEncoding(dataBlockEncoding);
3733    hcd.setCompressionType(compression);
3734    return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer);
3735  }
3736
3737  /**
3738   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3739   * continues.
3740   * @return the number of regions the table was split into
3741   */
3742  public static int createPreSplitLoadTestTable(Configuration conf, TableName tableName,
3743    byte[][] columnFamilies, Algorithm compression, DataBlockEncoding dataBlockEncoding,
3744    int numRegionsPerServer, int regionReplication, Durability durability) throws IOException {
3745    HTableDescriptor desc = new HTableDescriptor(tableName);
3746    desc.setDurability(durability);
3747    desc.setRegionReplication(regionReplication);
3748    HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length];
3749    for (int i = 0; i < columnFamilies.length; i++) {
3750      HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]);
3751      hcd.setDataBlockEncoding(dataBlockEncoding);
3752      hcd.setCompressionType(compression);
3753      hcds[i] = hcd;
3754    }
3755    return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer);
3756  }
3757
3758  /**
3759   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3760   * continues.
3761   * @return the number of regions the table was split into
3762   */
3763  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc,
3764    ColumnFamilyDescriptor hcd) throws IOException {
3765    return createPreSplitLoadTestTable(conf, desc, hcd, DEFAULT_REGIONS_PER_SERVER);
3766  }
3767
3768  /**
3769   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3770   * continues.
3771   * @return the number of regions the table was split into
3772   */
3773  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc,
3774    ColumnFamilyDescriptor hcd, int numRegionsPerServer) throws IOException {
3775    return createPreSplitLoadTestTable(conf, desc, new ColumnFamilyDescriptor[] { hcd },
3776      numRegionsPerServer);
3777  }
3778
3779  /**
3780   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3781   * continues.
3782   * @return the number of regions the table was split into
3783   */
3784  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor desc,
3785    ColumnFamilyDescriptor[] hcds, int numRegionsPerServer) throws IOException {
3786    return createPreSplitLoadTestTable(conf, desc, hcds, new RegionSplitter.HexStringSplit(),
3787      numRegionsPerServer);
3788  }
3789
3790  /**
3791   * Creates a pre-split table for load testing. If the table already exists, logs a warning and
3792   * continues.
3793   * @return the number of regions the table was split into
3794   */
3795  public static int createPreSplitLoadTestTable(Configuration conf, TableDescriptor td,
3796    ColumnFamilyDescriptor[] cds, SplitAlgorithm splitter, int numRegionsPerServer)
3797    throws IOException {
3798    TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(td);
3799    for (ColumnFamilyDescriptor cd : cds) {
3800      if (!td.hasColumnFamily(cd.getName())) {
3801        builder.setColumnFamily(cd);
3802      }
3803    }
3804    td = builder.build();
3805    int totalNumberOfRegions = 0;
3806    Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
3807    Admin admin = unmanagedConnection.getAdmin();
3808
3809    try {
3810      // create a table a pre-splits regions.
3811      // The number of splits is set as:
3812      // region servers * regions per region server).
3813      int numberOfServers = admin.getRegionServers().size();
3814      if (numberOfServers == 0) {
3815        throw new IllegalStateException("No live regionservers");
3816      }
3817
3818      totalNumberOfRegions = numberOfServers * numRegionsPerServer;
3819      LOG.info("Number of live regionservers: " + numberOfServers + ", "
3820        + "pre-splitting table into " + totalNumberOfRegions + " regions " + "(regions per server: "
3821        + numRegionsPerServer + ")");
3822
3823      byte[][] splits = splitter.split(totalNumberOfRegions);
3824
3825      admin.createTable(td, splits);
3826    } catch (MasterNotRunningException e) {
3827      LOG.error("Master not running", e);
3828      throw new IOException(e);
3829    } catch (TableExistsException e) {
3830      LOG.warn("Table " + td.getTableName() + " already exists, continuing");
3831    } finally {
3832      admin.close();
3833      unmanagedConnection.close();
3834    }
3835    return totalNumberOfRegions;
3836  }
3837
3838  public static int getMetaRSPort(Connection connection) throws IOException {
3839    try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
3840      return locator.getRegionLocation(Bytes.toBytes("")).getPort();
3841    }
3842  }
3843
3844  /**
3845   * Due to async racing issue, a region may not be in the online region list of a region server
3846   * yet, after the assignment znode is deleted and the new assignment is recorded in master.
3847   */
3848  public void assertRegionOnServer(final RegionInfo hri, final ServerName server,
3849    final long timeout) throws IOException, InterruptedException {
3850    long timeoutTime = System.currentTimeMillis() + timeout;
3851    while (true) {
3852      List<RegionInfo> regions = getAdmin().getRegions(server);
3853      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) return;
3854      long now = System.currentTimeMillis();
3855      if (now > timeoutTime) break;
3856      Thread.sleep(10);
3857    }
3858    fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server);
3859  }
3860
3861  /**
3862   * Check to make sure the region is open on the specified region server, but not on any other one.
3863   */
3864  public void assertRegionOnlyOnServer(final RegionInfo hri, final ServerName server,
3865    final long timeout) throws IOException, InterruptedException {
3866    long timeoutTime = System.currentTimeMillis() + timeout;
3867    while (true) {
3868      List<RegionInfo> regions = getAdmin().getRegions(server);
3869      if (regions.stream().anyMatch(r -> RegionInfo.COMPARATOR.compare(r, hri) == 0)) {
3870        List<JVMClusterUtil.RegionServerThread> rsThreads =
3871          getHBaseCluster().getLiveRegionServerThreads();
3872        for (JVMClusterUtil.RegionServerThread rsThread : rsThreads) {
3873          HRegionServer rs = rsThread.getRegionServer();
3874          if (server.equals(rs.getServerName())) {
3875            continue;
3876          }
3877          Collection<HRegion> hrs = rs.getOnlineRegionsLocalContext();
3878          for (HRegion r : hrs) {
3879            assertTrue("Region should not be double assigned",
3880              r.getRegionInfo().getRegionId() != hri.getRegionId());
3881          }
3882        }
3883        return; // good, we are happy
3884      }
3885      long now = System.currentTimeMillis();
3886      if (now > timeoutTime) break;
3887      Thread.sleep(10);
3888    }
3889    fail("Could not find region " + hri.getRegionNameAsString() + " on server " + server);
3890  }
3891
3892  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd) throws IOException {
3893    TableDescriptor td =
3894      TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
3895    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
3896    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td);
3897  }
3898
3899  public HRegion createTestRegion(String tableName, ColumnFamilyDescriptor cd,
3900    BlockCache blockCache) throws IOException {
3901    TableDescriptor td =
3902      TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)).setColumnFamily(cd).build();
3903    RegionInfo info = RegionInfoBuilder.newBuilder(TableName.valueOf(tableName)).build();
3904    return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), td, blockCache);
3905  }
3906
3907  public void setFileSystemURI(String fsURI) {
3908    FS_URI = fsURI;
3909  }
3910
3911  /**
3912   * Returns a {@link Predicate} for checking that there are no regions in transition in master
3913   */
3914  public ExplainingPredicate<IOException> predicateNoRegionsInTransition() {
3915    return new ExplainingPredicate<IOException>() {
3916      @Override
3917      public String explainFailure() throws IOException {
3918        final RegionStates regionStates =
3919          getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
3920        return "found in transition: " + regionStates.getRegionsInTransition().toString();
3921      }
3922
3923      @Override
3924      public boolean evaluate() throws IOException {
3925        HMaster master = getMiniHBaseCluster().getMaster();
3926        if (master == null) return false;
3927        AssignmentManager am = master.getAssignmentManager();
3928        if (am == null) return false;
3929        return !am.hasRegionsInTransition();
3930      }
3931    };
3932  }
3933
3934  /**
3935   * Returns a {@link Predicate} for checking that table is enabled
3936   */
3937  public Waiter.Predicate<IOException> predicateTableEnabled(final TableName tableName) {
3938    return new ExplainingPredicate<IOException>() {
3939      @Override
3940      public String explainFailure() throws IOException {
3941        return explainTableState(tableName, TableState.State.ENABLED);
3942      }
3943
3944      @Override
3945      public boolean evaluate() throws IOException {
3946        return getAdmin().tableExists(tableName) && getAdmin().isTableEnabled(tableName);
3947      }
3948    };
3949  }
3950
3951  /**
3952   * Returns a {@link Predicate} for checking that table is enabled
3953   */
3954  public Waiter.Predicate<IOException> predicateTableDisabled(final TableName tableName) {
3955    return new ExplainingPredicate<IOException>() {
3956      @Override
3957      public String explainFailure() throws IOException {
3958        return explainTableState(tableName, TableState.State.DISABLED);
3959      }
3960
3961      @Override
3962      public boolean evaluate() throws IOException {
3963        return getAdmin().isTableDisabled(tableName);
3964      }
3965    };
3966  }
3967
3968  /**
3969   * Returns a {@link Predicate} for checking that table is enabled
3970   */
3971  public Waiter.Predicate<IOException> predicateTableAvailable(final TableName tableName) {
3972    return new ExplainingPredicate<IOException>() {
3973      @Override
3974      public String explainFailure() throws IOException {
3975        return explainTableAvailability(tableName);
3976      }
3977
3978      @Override
3979      public boolean evaluate() throws IOException {
3980        boolean tableAvailable = getAdmin().isTableAvailable(tableName);
3981        if (tableAvailable) {
3982          try (Table table = getConnection().getTable(tableName)) {
3983            TableDescriptor htd = table.getDescriptor();
3984            for (HRegionLocation loc : getConnection().getRegionLocator(tableName)
3985              .getAllRegionLocations()) {
3986              Scan scan = new Scan().withStartRow(loc.getRegionInfo().getStartKey())
3987                .withStopRow(loc.getRegionInfo().getEndKey()).setOneRowLimit()
3988                .setMaxResultsPerColumnFamily(1).setCacheBlocks(false);
3989              for (byte[] family : htd.getColumnFamilyNames()) {
3990                scan.addFamily(family);
3991              }
3992              try (ResultScanner scanner = table.getScanner(scan)) {
3993                scanner.next();
3994              }
3995            }
3996          }
3997        }
3998        return tableAvailable;
3999      }
4000    };
4001  }
4002
4003  /**
4004   * Wait until no regions in transition.
4005   * @param timeout How long to wait.
4006   */
4007  public void waitUntilNoRegionsInTransition(final long timeout) throws IOException {
4008    waitFor(timeout, predicateNoRegionsInTransition());
4009  }
4010
4011  /**
4012   * Wait until no regions in transition. (time limit 15min)
4013   */
4014  public void waitUntilNoRegionsInTransition() throws IOException {
4015    waitUntilNoRegionsInTransition(15 * 60000);
4016  }
4017
4018  /**
4019   * Wait until labels is ready in VisibilityLabelsCache.
4020   */
4021  public void waitLabelAvailable(long timeoutMillis, final String... labels) {
4022    final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get();
4023    waitFor(timeoutMillis, new Waiter.ExplainingPredicate<RuntimeException>() {
4024
4025      @Override
4026      public boolean evaluate() {
4027        for (String label : labels) {
4028          if (labelsCache.getLabelOrdinal(label) == 0) {
4029            return false;
4030          }
4031        }
4032        return true;
4033      }
4034
4035      @Override
4036      public String explainFailure() {
4037        for (String label : labels) {
4038          if (labelsCache.getLabelOrdinal(label) == 0) {
4039            return label + " is not available yet";
4040          }
4041        }
4042        return "";
4043      }
4044    });
4045  }
4046
4047  /**
4048   * Create a set of column descriptors with the combination of compression, encoding, bloom codecs
4049   * available.
4050   * @return the list of column descriptors
4051   */
4052  public static List<HColumnDescriptor> generateColumnDescriptors() {
4053    return generateColumnDescriptors("");
4054  }
4055
4056  /**
4057   * Create a set of column descriptors with the combination of compression, encoding, bloom codecs
4058   * available.
4059   * @param prefix family names prefix
4060   * @return the list of column descriptors
4061   */
4062  public static List<HColumnDescriptor> generateColumnDescriptors(final String prefix) {
4063    List<HColumnDescriptor> htds = new ArrayList<>();
4064    long familyId = 0;
4065    for (Compression.Algorithm compressionType : getSupportedCompressionAlgorithms()) {
4066      for (DataBlockEncoding encodingType : DataBlockEncoding.values()) {
4067        for (BloomType bloomType : BloomType.values()) {
4068          String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId);
4069          HColumnDescriptor htd = new HColumnDescriptor(name);
4070          htd.setCompressionType(compressionType);
4071          htd.setDataBlockEncoding(encodingType);
4072          htd.setBloomFilterType(bloomType);
4073          htds.add(htd);
4074          familyId++;
4075        }
4076      }
4077    }
4078    return htds;
4079  }
4080
4081  /**
4082   * Get supported compression algorithms.
4083   * @return supported compression algorithms.
4084   */
4085  public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
4086    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
4087    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
4088    for (String algoName : allAlgos) {
4089      try {
4090        Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
4091        algo.getCompressor();
4092        supportedAlgos.add(algo);
4093      } catch (Throwable t) {
4094        // this algo is not available
4095      }
4096    }
4097    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
4098  }
4099
4100  public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException {
4101    Scan scan = new Scan(row);
4102    scan.setSmall(true);
4103    scan.setCaching(1);
4104    scan.setReversed(true);
4105    scan.addFamily(family);
4106    try (RegionScanner scanner = r.getScanner(scan)) {
4107      List<Cell> cells = new ArrayList<>(1);
4108      scanner.next(cells);
4109      if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) {
4110        return null;
4111      }
4112      return Result.create(cells);
4113    }
4114  }
4115
4116  private boolean isTargetTable(final byte[] inRow, Cell c) {
4117    String inputRowString = Bytes.toString(inRow);
4118    int i = inputRowString.indexOf(HConstants.DELIMITER);
4119    String outputRowString = Bytes.toString(c.getRowArray(), c.getRowOffset(), c.getRowLength());
4120    int o = outputRowString.indexOf(HConstants.DELIMITER);
4121    return inputRowString.substring(0, i).equals(outputRowString.substring(0, o));
4122  }
4123
4124  /**
4125   * Sets up {@link MiniKdc} for testing security. Uses {@link HBaseKerberosUtils} to set the given
4126   * keytab file as {@link HBaseKerberosUtils#KRB_KEYTAB_FILE}. FYI, there is also the easier-to-use
4127   * kerby KDC server and utility for using it,
4128   * {@link org.apache.hadoop.hbase.util.SimpleKdcServerUtil}. The kerby KDC server is preferred;
4129   * less baggage. It came in in HBASE-5291.
4130   */
4131  public MiniKdc setupMiniKdc(File keytabFile) throws Exception {
4132    Properties conf = MiniKdc.createConf();
4133    conf.put(MiniKdc.DEBUG, true);
4134    MiniKdc kdc = null;
4135    File dir = null;
4136    // There is time lag between selecting a port and trying to bind with it. It's possible that
4137    // another service captures the port in between which'll result in BindException.
4138    boolean bindException;
4139    int numTries = 0;
4140    do {
4141      try {
4142        bindException = false;
4143        dir = new File(getDataTestDir("kdc").toUri().getPath());
4144        kdc = new MiniKdc(conf, dir);
4145        kdc.start();
4146      } catch (BindException e) {
4147        FileUtils.deleteDirectory(dir); // clean directory
4148        numTries++;
4149        if (numTries == 3) {
4150          LOG.error("Failed setting up MiniKDC. Tried " + numTries + " times.");
4151          throw e;
4152        }
4153        LOG.error("BindException encountered when setting up MiniKdc. Trying again.");
4154        bindException = true;
4155      }
4156    } while (bindException);
4157    HBaseKerberosUtils.setKeytabFileForTesting(keytabFile.getAbsolutePath());
4158    return kdc;
4159  }
4160
4161  public int getNumHFiles(final TableName tableName, final byte[] family) {
4162    int numHFiles = 0;
4163    for (RegionServerThread regionServerThread : getMiniHBaseCluster().getRegionServerThreads()) {
4164      numHFiles += getNumHFilesForRS(regionServerThread.getRegionServer(), tableName, family);
4165    }
4166    return numHFiles;
4167  }
4168
4169  public int getNumHFilesForRS(final HRegionServer rs, final TableName tableName,
4170    final byte[] family) {
4171    int numHFiles = 0;
4172    for (Region region : rs.getRegions(tableName)) {
4173      numHFiles += region.getStore(family).getStorefilesCount();
4174    }
4175    return numHFiles;
4176  }
4177
4178  public void verifyTableDescriptorIgnoreTableName(TableDescriptor ltd, TableDescriptor rtd) {
4179    assertEquals(ltd.getValues().hashCode(), rtd.getValues().hashCode());
4180    Collection<ColumnFamilyDescriptor> ltdFamilies = Arrays.asList(ltd.getColumnFamilies());
4181    Collection<ColumnFamilyDescriptor> rtdFamilies = Arrays.asList(rtd.getColumnFamilies());
4182    assertEquals(ltdFamilies.size(), rtdFamilies.size());
4183    for (Iterator<ColumnFamilyDescriptor> it = ltdFamilies.iterator(),
4184        it2 = rtdFamilies.iterator(); it.hasNext();) {
4185      assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(it.next(), it2.next()));
4186    }
4187  }
4188
4189  /**
4190   * Await the successful return of {@code condition}, sleeping {@code sleepMillis} between
4191   * invocations.
4192   */
4193  public static void await(final long sleepMillis, final BooleanSupplier condition)
4194    throws InterruptedException {
4195    try {
4196      while (!condition.getAsBoolean()) {
4197        Thread.sleep(sleepMillis);
4198      }
4199    } catch (RuntimeException e) {
4200      if (e.getCause() instanceof AssertionError) {
4201        throw (AssertionError) e.getCause();
4202      }
4203      throw e;
4204    }
4205  }
4206}