001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019package org.apache.hadoop.hbase.snapshot;
020
021import java.io.BufferedInputStream;
022import java.io.DataInput;
023import java.io.DataOutput;
024import java.io.FileNotFoundException;
025import java.io.IOException;
026import java.io.InputStream;
027import java.util.ArrayList;
028import java.util.Collections;
029import java.util.Comparator;
030import java.util.LinkedList;
031import java.util.List;
032import java.util.concurrent.ExecutionException;
033import java.util.concurrent.ExecutorService;
034import java.util.concurrent.Executors;
035import java.util.concurrent.Future;
036import java.util.function.BiConsumer;
037import org.apache.hadoop.conf.Configuration;
038import org.apache.hadoop.fs.FSDataInputStream;
039import org.apache.hadoop.fs.FSDataOutputStream;
040import org.apache.hadoop.fs.FileChecksum;
041import org.apache.hadoop.fs.FileStatus;
042import org.apache.hadoop.fs.FileSystem;
043import org.apache.hadoop.fs.Path;
044import org.apache.hadoop.fs.permission.FsPermission;
045import org.apache.hadoop.hbase.HBaseConfiguration;
046import org.apache.hadoop.hbase.HConstants;
047import org.apache.hadoop.hbase.TableName;
048import org.apache.hadoop.hbase.client.RegionInfo;
049import org.apache.hadoop.hbase.io.FileLink;
050import org.apache.hadoop.hbase.io.HFileLink;
051import org.apache.hadoop.hbase.io.WALLink;
052import org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
053import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
054import org.apache.hadoop.hbase.mob.MobUtils;
055import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
056import org.apache.hadoop.hbase.util.AbstractHBaseTool;
057import org.apache.hadoop.hbase.util.CommonFSUtils;
058import org.apache.hadoop.hbase.util.FSUtils;
059import org.apache.hadoop.hbase.util.HFileArchiveUtil;
060import org.apache.hadoop.hbase.util.Pair;
061import org.apache.hadoop.io.BytesWritable;
062import org.apache.hadoop.io.IOUtils;
063import org.apache.hadoop.io.NullWritable;
064import org.apache.hadoop.io.Writable;
065import org.apache.hadoop.mapreduce.InputFormat;
066import org.apache.hadoop.mapreduce.InputSplit;
067import org.apache.hadoop.mapreduce.Job;
068import org.apache.hadoop.mapreduce.JobContext;
069import org.apache.hadoop.mapreduce.Mapper;
070import org.apache.hadoop.mapreduce.RecordReader;
071import org.apache.hadoop.mapreduce.TaskAttemptContext;
072import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
073import org.apache.hadoop.mapreduce.security.TokenCache;
074import org.apache.hadoop.util.StringUtils;
075import org.apache.hadoop.util.Tool;
076import org.apache.yetus.audience.InterfaceAudience;
077import org.slf4j.Logger;
078import org.slf4j.LoggerFactory;
079
080import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine;
081import org.apache.hbase.thirdparty.org.apache.commons.cli.Option;
082
083import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
084import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
085import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
086
087/**
088 * Export the specified snapshot to a given FileSystem.
089 *
090 * The .snapshot/name folder is copied to the destination cluster
091 * and then all the hfiles/wals are copied using a Map-Reduce Job in the .archive/ location.
092 * When everything is done, the second cluster can restore the snapshot.
093 */
094@InterfaceAudience.Public
095public class ExportSnapshot extends AbstractHBaseTool implements Tool {
096  public static final String NAME = "exportsnapshot";
097  /** Configuration prefix for overrides for the source filesystem */
098  public static final String CONF_SOURCE_PREFIX = NAME + ".from.";
099  /** Configuration prefix for overrides for the destination filesystem */
100  public static final String CONF_DEST_PREFIX = NAME + ".to.";
101
102  private static final Logger LOG = LoggerFactory.getLogger(ExportSnapshot.class);
103
104  private static final String MR_NUM_MAPS = "mapreduce.job.maps";
105  private static final String CONF_NUM_SPLITS = "snapshot.export.format.splits";
106  private static final String CONF_SNAPSHOT_NAME = "snapshot.export.format.snapshot.name";
107  private static final String CONF_SNAPSHOT_DIR = "snapshot.export.format.snapshot.dir";
108  private static final String CONF_FILES_USER = "snapshot.export.files.attributes.user";
109  private static final String CONF_FILES_GROUP = "snapshot.export.files.attributes.group";
110  private static final String CONF_FILES_MODE = "snapshot.export.files.attributes.mode";
111  private static final String CONF_CHECKSUM_VERIFY = "snapshot.export.checksum.verify";
112  private static final String CONF_OUTPUT_ROOT = "snapshot.export.output.root";
113  private static final String CONF_INPUT_ROOT = "snapshot.export.input.root";
114  private static final String CONF_BUFFER_SIZE = "snapshot.export.buffer.size";
115  private static final String CONF_MAP_GROUP = "snapshot.export.default.map.group";
116  private static final String CONF_BANDWIDTH_MB = "snapshot.export.map.bandwidth.mb";
117  private static final String CONF_MR_JOB_NAME = "mapreduce.job.name";
118  protected static final String CONF_SKIP_TMP = "snapshot.export.skip.tmp";
119  private static final String CONF_COPY_MANIFEST_THREADS =
120      "snapshot.export.copy.references.threads";
121  private static final int DEFAULT_COPY_MANIFEST_THREADS =
122      Runtime.getRuntime().availableProcessors();
123
124  static class Testing {
125    static final String CONF_TEST_FAILURE = "test.snapshot.export.failure";
126    static final String CONF_TEST_FAILURE_COUNT = "test.snapshot.export.failure.count";
127    int failuresCountToInject = 0;
128    int injectedFailureCount = 0;
129  }
130
131  // Command line options and defaults.
132  static final class Options {
133    static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore.");
134    static final Option TARGET_NAME = new Option(null, "target", true,
135        "Target name for the snapshot.");
136    static final Option COPY_TO = new Option(null, "copy-to", true, "Remote "
137        + "destination hdfs://");
138    static final Option COPY_FROM = new Option(null, "copy-from", true,
139        "Input folder hdfs:// (default hbase.rootdir)");
140    static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false,
141        "Do not verify checksum, use name+length only.");
142    static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false,
143        "Do not verify the integrity of the exported snapshot.");
144    static final Option OVERWRITE = new Option(null, "overwrite", false,
145        "Rewrite the snapshot manifest if already exists.");
146    static final Option CHUSER = new Option(null, "chuser", true,
147        "Change the owner of the files to the specified one.");
148    static final Option CHGROUP = new Option(null, "chgroup", true,
149        "Change the group of the files to the specified one.");
150    static final Option CHMOD = new Option(null, "chmod", true,
151        "Change the permission of the files to the specified one.");
152    static final Option MAPPERS = new Option(null, "mappers", true,
153        "Number of mappers to use during the copy (mapreduce.job.maps).");
154    static final Option BANDWIDTH = new Option(null, "bandwidth", true,
155        "Limit bandwidth to this value in MB/second.");
156  }
157
158  // Export Map-Reduce Counters, to keep track of the progress
159  public enum Counter {
160    MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED,
161    BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED
162  }
163
164  private static class ExportMapper extends Mapper<BytesWritable, NullWritable,
165                                                   NullWritable, NullWritable> {
166    private static final Logger LOG = LoggerFactory.getLogger(ExportMapper.class);
167    final static int REPORT_SIZE = 1 * 1024 * 1024;
168    final static int BUFFER_SIZE = 64 * 1024;
169
170    private boolean verifyChecksum;
171    private String filesGroup;
172    private String filesUser;
173    private short filesMode;
174    private int bufferSize;
175
176    private FileSystem outputFs;
177    private Path outputArchive;
178    private Path outputRoot;
179
180    private FileSystem inputFs;
181    private Path inputArchive;
182    private Path inputRoot;
183
184    private static Testing testing = new Testing();
185
186    @Override
187    public void setup(Context context) throws IOException {
188      Configuration conf = context.getConfiguration();
189
190      Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
191      Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
192
193      verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true);
194
195      filesGroup = conf.get(CONF_FILES_GROUP);
196      filesUser = conf.get(CONF_FILES_USER);
197      filesMode = (short)conf.getInt(CONF_FILES_MODE, 0);
198      outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT));
199      inputRoot = new Path(conf.get(CONF_INPUT_ROOT));
200
201      inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);
202      outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY);
203
204      try {
205        srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true);
206        inputFs = FileSystem.get(inputRoot.toUri(), srcConf);
207      } catch (IOException e) {
208        throw new IOException("Could not get the input FileSystem with root=" + inputRoot, e);
209      }
210
211      try {
212        destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true);
213        outputFs = FileSystem.get(outputRoot.toUri(), destConf);
214      } catch (IOException e) {
215        throw new IOException("Could not get the output FileSystem with root="+ outputRoot, e);
216      }
217
218      // Use the default block size of the outputFs if bigger
219      int defaultBlockSize = Math.max((int) outputFs.getDefaultBlockSize(outputRoot), BUFFER_SIZE);
220      bufferSize = conf.getInt(CONF_BUFFER_SIZE, defaultBlockSize);
221      LOG.info("Using bufferSize=" + StringUtils.humanReadableInt(bufferSize));
222
223      for (Counter c : Counter.values()) {
224        context.getCounter(c).increment(0);
225      }
226      if (context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) {
227        testing.failuresCountToInject = conf.getInt(Testing.CONF_TEST_FAILURE_COUNT, 0);
228        // Get number of times we have already injected failure based on attempt number of this
229        // task.
230        testing.injectedFailureCount = context.getTaskAttemptID().getId();
231      }
232    }
233
234    @Override
235    protected void cleanup(Context context) {
236      IOUtils.closeStream(inputFs);
237      IOUtils.closeStream(outputFs);
238    }
239
240    @Override
241    public void map(BytesWritable key, NullWritable value, Context context)
242        throws InterruptedException, IOException {
243      SnapshotFileInfo inputInfo = SnapshotFileInfo.parseFrom(key.copyBytes());
244      Path outputPath = getOutputPath(inputInfo);
245
246      copyFile(context, inputInfo, outputPath);
247    }
248
249    /**
250     * Returns the location where the inputPath will be copied.
251     */
252    private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException {
253      Path path = null;
254      switch (inputInfo.getType()) {
255        case HFILE:
256          Path inputPath = new Path(inputInfo.getHfile());
257          String family = inputPath.getParent().getName();
258          TableName table =HFileLink.getReferencedTableName(inputPath.getName());
259          String region = HFileLink.getReferencedRegionName(inputPath.getName());
260          String hfile = HFileLink.getReferencedHFileName(inputPath.getName());
261          path = new Path(CommonFSUtils.getTableDir(new Path("./"), table),
262              new Path(region, new Path(family, hfile)));
263          break;
264        case WAL:
265          LOG.warn("snapshot does not keeps WALs: " + inputInfo);
266          break;
267        default:
268          throw new IOException("Invalid File Type: " + inputInfo.getType().toString());
269      }
270      return new Path(outputArchive, path);
271    }
272
273    @SuppressWarnings("checkstyle:linelength")
274    /**
275     * Used by TestExportSnapshot to test for retries when failures happen.
276     * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}.
277     */
278    private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo)
279        throws IOException {
280      if (!context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) return;
281      if (testing.injectedFailureCount >= testing.failuresCountToInject) return;
282      testing.injectedFailureCount++;
283      context.getCounter(Counter.COPY_FAILED).increment(1);
284      LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount);
285      throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s",
286          testing.injectedFailureCount, testing.failuresCountToInject, inputInfo));
287    }
288
289    private void copyFile(final Context context, final SnapshotFileInfo inputInfo,
290        final Path outputPath) throws IOException {
291      // Get the file information
292      FileStatus inputStat = getSourceFileStatus(context, inputInfo);
293
294      // Verify if the output file exists and is the same that we want to copy
295      if (outputFs.exists(outputPath)) {
296        FileStatus outputStat = outputFs.getFileStatus(outputPath);
297        if (outputStat != null && sameFile(inputStat, outputStat)) {
298          LOG.info("Skip copy " + inputStat.getPath() + " to " + outputPath + ", same file.");
299          context.getCounter(Counter.FILES_SKIPPED).increment(1);
300          context.getCounter(Counter.BYTES_SKIPPED).increment(inputStat.getLen());
301          return;
302        }
303      }
304
305      InputStream in = openSourceFile(context, inputInfo);
306      int bandwidthMB = context.getConfiguration().getInt(CONF_BANDWIDTH_MB, 100);
307      if (Integer.MAX_VALUE != bandwidthMB) {
308        in = new ThrottledInputStream(new BufferedInputStream(in), bandwidthMB * 1024 * 1024L);
309      }
310
311      try {
312        context.getCounter(Counter.BYTES_EXPECTED).increment(inputStat.getLen());
313
314        // Ensure that the output folder is there and copy the file
315        createOutputPath(outputPath.getParent());
316        FSDataOutputStream out = outputFs.create(outputPath, true);
317        try {
318          copyData(context, inputStat.getPath(), in, outputPath, out, inputStat.getLen());
319        } finally {
320          out.close();
321        }
322
323        // Try to Preserve attributes
324        if (!preserveAttributes(outputPath, inputStat)) {
325          LOG.warn("You may have to run manually chown on: " + outputPath);
326        }
327      } finally {
328        in.close();
329        injectTestFailure(context, inputInfo);
330      }
331    }
332
333    /**
334     * Create the output folder and optionally set ownership.
335     */
336    private void createOutputPath(final Path path) throws IOException {
337      if (filesUser == null && filesGroup == null) {
338        outputFs.mkdirs(path);
339      } else {
340        Path parent = path.getParent();
341        if (!outputFs.exists(parent) && !parent.isRoot()) {
342          createOutputPath(parent);
343        }
344        outputFs.mkdirs(path);
345        if (filesUser != null || filesGroup != null) {
346          // override the owner when non-null user/group is specified
347          outputFs.setOwner(path, filesUser, filesGroup);
348        }
349        if (filesMode > 0) {
350          outputFs.setPermission(path, new FsPermission(filesMode));
351        }
352      }
353    }
354
355    /**
356     * Try to Preserve the files attribute selected by the user copying them from the source file
357     * This is only required when you are exporting as a different user than "hbase" or on a system
358     * that doesn't have the "hbase" user.
359     *
360     * This is not considered a blocking failure since the user can force a chmod with the user
361     * that knows is available on the system.
362     */
363    private boolean preserveAttributes(final Path path, final FileStatus refStat) {
364      FileStatus stat;
365      try {
366        stat = outputFs.getFileStatus(path);
367      } catch (IOException e) {
368        LOG.warn("Unable to get the status for file=" + path);
369        return false;
370      }
371
372      try {
373        if (filesMode > 0 && stat.getPermission().toShort() != filesMode) {
374          outputFs.setPermission(path, new FsPermission(filesMode));
375        } else if (refStat != null && !stat.getPermission().equals(refStat.getPermission())) {
376          outputFs.setPermission(path, refStat.getPermission());
377        }
378      } catch (IOException e) {
379        LOG.warn("Unable to set the permission for file="+ stat.getPath() +": "+ e.getMessage());
380        return false;
381      }
382
383      boolean hasRefStat = (refStat != null);
384      String user = stringIsNotEmpty(filesUser) || !hasRefStat ? filesUser : refStat.getOwner();
385      String group = stringIsNotEmpty(filesGroup) || !hasRefStat ? filesGroup : refStat.getGroup();
386      if (stringIsNotEmpty(user) || stringIsNotEmpty(group)) {
387        try {
388          if (!(user.equals(stat.getOwner()) && group.equals(stat.getGroup()))) {
389            outputFs.setOwner(path, user, group);
390          }
391        } catch (IOException e) {
392          LOG.warn("Unable to set the owner/group for file="+ stat.getPath() +": "+ e.getMessage());
393          LOG.warn("The user/group may not exist on the destination cluster: user=" +
394                   user + " group=" + group);
395          return false;
396        }
397      }
398
399      return true;
400    }
401
402    private boolean stringIsNotEmpty(final String str) {
403      return str != null && str.length() > 0;
404    }
405
406    private void copyData(final Context context,
407        final Path inputPath, final InputStream in,
408        final Path outputPath, final FSDataOutputStream out,
409        final long inputFileSize)
410        throws IOException {
411      final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) +
412                                   " (%.1f%%)";
413
414      try {
415        byte[] buffer = new byte[bufferSize];
416        long totalBytesWritten = 0;
417        int reportBytes = 0;
418        int bytesRead;
419
420        long stime = System.currentTimeMillis();
421        while ((bytesRead = in.read(buffer)) > 0) {
422          out.write(buffer, 0, bytesRead);
423          totalBytesWritten += bytesRead;
424          reportBytes += bytesRead;
425
426          if (reportBytes >= REPORT_SIZE) {
427            context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
428            context.setStatus(String.format(statusMessage,
429                              StringUtils.humanReadableInt(totalBytesWritten),
430                              (totalBytesWritten/(float)inputFileSize) * 100.0f) +
431                              " from " + inputPath + " to " + outputPath);
432            reportBytes = 0;
433          }
434        }
435        long etime = System.currentTimeMillis();
436
437        context.getCounter(Counter.BYTES_COPIED).increment(reportBytes);
438        context.setStatus(String.format(statusMessage,
439                          StringUtils.humanReadableInt(totalBytesWritten),
440                          (totalBytesWritten/(float)inputFileSize) * 100.0f) +
441                          " from " + inputPath + " to " + outputPath);
442
443        // Verify that the written size match
444        if (totalBytesWritten != inputFileSize) {
445          String msg = "number of bytes copied not matching copied=" + totalBytesWritten +
446                       " expected=" + inputFileSize + " for file=" + inputPath;
447          throw new IOException(msg);
448        }
449
450        LOG.info("copy completed for input=" + inputPath + " output=" + outputPath);
451        LOG.info("size=" + totalBytesWritten +
452            " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" +
453            " time=" + StringUtils.formatTimeDiff(etime, stime) +
454            String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0));
455        context.getCounter(Counter.FILES_COPIED).increment(1);
456      } catch (IOException e) {
457        LOG.error("Error copying " + inputPath + " to " + outputPath, e);
458        context.getCounter(Counter.COPY_FAILED).increment(1);
459        throw e;
460      }
461    }
462
463    /**
464     * Try to open the "source" file.
465     * Throws an IOException if the communication with the inputFs fail or
466     * if the file is not found.
467     */
468    private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo)
469            throws IOException {
470      try {
471        Configuration conf = context.getConfiguration();
472        FileLink link = null;
473        switch (fileInfo.getType()) {
474          case HFILE:
475            Path inputPath = new Path(fileInfo.getHfile());
476            link = getFileLink(inputPath, conf);
477            break;
478          case WAL:
479            String serverName = fileInfo.getWalServer();
480            String logName = fileInfo.getWalName();
481            link = new WALLink(inputRoot, serverName, logName);
482            break;
483          default:
484            throw new IOException("Invalid File Type: " + fileInfo.getType().toString());
485        }
486        return link.open(inputFs);
487      } catch (IOException e) {
488        context.getCounter(Counter.MISSING_FILES).increment(1);
489        LOG.error("Unable to open source file=" + fileInfo.toString(), e);
490        throw e;
491      }
492    }
493
494    private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo fileInfo)
495        throws IOException {
496      try {
497        Configuration conf = context.getConfiguration();
498        FileLink link = null;
499        switch (fileInfo.getType()) {
500          case HFILE:
501            Path inputPath = new Path(fileInfo.getHfile());
502            link = getFileLink(inputPath, conf);
503            break;
504          case WAL:
505            link = new WALLink(inputRoot, fileInfo.getWalServer(), fileInfo.getWalName());
506            break;
507          default:
508            throw new IOException("Invalid File Type: " + fileInfo.getType().toString());
509        }
510        return link.getFileStatus(inputFs);
511      } catch (FileNotFoundException e) {
512        context.getCounter(Counter.MISSING_FILES).increment(1);
513        LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e);
514        throw e;
515      } catch (IOException e) {
516        LOG.error("Unable to get the status for source file=" + fileInfo.toString(), e);
517        throw e;
518      }
519    }
520
521    private FileLink getFileLink(Path path, Configuration conf) throws IOException{
522      String regionName = HFileLink.getReferencedRegionName(path.getName());
523      TableName tableName = HFileLink.getReferencedTableName(path.getName());
524      if(MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) {
525        return HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf),
526                HFileArchiveUtil.getArchivePath(conf), path);
527      }
528      return HFileLink.buildFromHFileLinkPattern(inputRoot, inputArchive, path);
529    }
530
531    private FileChecksum getFileChecksum(final FileSystem fs, final Path path) {
532      try {
533        return fs.getFileChecksum(path);
534      } catch (IOException e) {
535        LOG.warn("Unable to get checksum for file=" + path, e);
536        return null;
537      }
538    }
539
540    /**
541     * Check if the two files are equal by looking at the file length,
542     * and at the checksum (if user has specified the verifyChecksum flag).
543     */
544    private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) {
545      // Not matching length
546      if (inputStat.getLen() != outputStat.getLen()) return false;
547
548      // Mark files as equals, since user asked for no checksum verification
549      if (!verifyChecksum) return true;
550
551      // If checksums are not available, files are not the same.
552      FileChecksum inChecksum = getFileChecksum(inputFs, inputStat.getPath());
553      if (inChecksum == null) return false;
554
555      FileChecksum outChecksum = getFileChecksum(outputFs, outputStat.getPath());
556      if (outChecksum == null) return false;
557
558      return inChecksum.equals(outChecksum);
559    }
560  }
561
562  // ==========================================================================
563  //  Input Format
564  // ==========================================================================
565
566  /**
567   * Extract the list of files (HFiles/WALs) to copy using Map-Reduce.
568   * @return list of files referenced by the snapshot (pair of path and size)
569   */
570  private static List<Pair<SnapshotFileInfo, Long>> getSnapshotFiles(final Configuration conf,
571      final FileSystem fs, final Path snapshotDir) throws IOException {
572    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
573
574    final List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<>();
575    final TableName table = TableName.valueOf(snapshotDesc.getTable());
576
577    // Get snapshot files
578    LOG.info("Loading Snapshot '" + snapshotDesc.getName() + "' hfile list");
579    SnapshotReferenceUtil.visitReferencedFiles(conf, fs, snapshotDir, snapshotDesc,
580      new SnapshotReferenceUtil.SnapshotVisitor() {
581        @Override
582        public void storeFile(final RegionInfo regionInfo, final String family,
583            final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
584          Pair<SnapshotFileInfo, Long> snapshotFileAndSize = null;
585          if (!storeFile.hasReference()) {
586            String region = regionInfo.getEncodedName();
587            String hfile = storeFile.getName();
588            snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, region, family, hfile,
589              storeFile.hasFileSize() ? storeFile.getFileSize() : -1);
590          } else {
591            Pair<String, String> referredToRegionAndFile =
592                StoreFileInfo.getReferredToRegionAndFile(storeFile.getName());
593            String referencedRegion = referredToRegionAndFile.getFirst();
594            String referencedHFile = referredToRegionAndFile.getSecond();
595            snapshotFileAndSize = getSnapshotFileAndSize(fs, conf, table, referencedRegion, family,
596              referencedHFile, storeFile.hasFileSize() ? storeFile.getFileSize() : -1);
597          }
598          files.add(snapshotFileAndSize);
599        }
600      });
601
602    return files;
603  }
604
605  private static Pair<SnapshotFileInfo, Long> getSnapshotFileAndSize(FileSystem fs,
606      Configuration conf, TableName table, String region, String family, String hfile, long size)
607      throws IOException {
608    Path path = HFileLink.createPath(table, region, family, hfile);
609    SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder().setType(SnapshotFileInfo.Type.HFILE)
610        .setHfile(path.toString()).build();
611    if (size == -1) {
612      size = HFileLink.buildFromHFileLinkPattern(conf, path).getFileStatus(fs).getLen();
613    }
614    return new Pair<>(fileInfo, size);
615  }
616
617  /**
618   * Given a list of file paths and sizes, create around ngroups in as balanced a way as possible.
619   * The groups created will have similar amounts of bytes.
620   * <p>
621   * The algorithm used is pretty straightforward; the file list is sorted by size,
622   * and then each group fetch the bigger file available, iterating through groups
623   * alternating the direction.
624   */
625  static List<List<Pair<SnapshotFileInfo, Long>>> getBalancedSplits(
626      final List<Pair<SnapshotFileInfo, Long>> files, final int ngroups) {
627    // Sort files by size, from small to big
628    Collections.sort(files, new Comparator<Pair<SnapshotFileInfo, Long>>() {
629      public int compare(Pair<SnapshotFileInfo, Long> a, Pair<SnapshotFileInfo, Long> b) {
630        long r = a.getSecond() - b.getSecond();
631        return (r < 0) ? -1 : ((r > 0) ? 1 : 0);
632      }
633    });
634
635    // create balanced groups
636    List<List<Pair<SnapshotFileInfo, Long>>> fileGroups = new LinkedList<>();
637    long[] sizeGroups = new long[ngroups];
638    int hi = files.size() - 1;
639    int lo = 0;
640
641    List<Pair<SnapshotFileInfo, Long>> group;
642    int dir = 1;
643    int g = 0;
644
645    while (hi >= lo) {
646      if (g == fileGroups.size()) {
647        group = new LinkedList<>();
648        fileGroups.add(group);
649      } else {
650        group = fileGroups.get(g);
651      }
652
653      Pair<SnapshotFileInfo, Long> fileInfo = files.get(hi--);
654
655      // add the hi one
656      sizeGroups[g] += fileInfo.getSecond();
657      group.add(fileInfo);
658
659      // change direction when at the end or the beginning
660      g += dir;
661      if (g == ngroups) {
662        dir = -1;
663        g = ngroups - 1;
664      } else if (g < 0) {
665        dir = 1;
666        g = 0;
667      }
668    }
669
670    if (LOG.isDebugEnabled()) {
671      for (int i = 0; i < sizeGroups.length; ++i) {
672        LOG.debug("export split=" + i + " size=" + StringUtils.humanReadableInt(sizeGroups[i]));
673      }
674    }
675
676    return fileGroups;
677  }
678
679  private static class ExportSnapshotInputFormat extends InputFormat<BytesWritable, NullWritable> {
680    @Override
681    public RecordReader<BytesWritable, NullWritable> createRecordReader(InputSplit split,
682        TaskAttemptContext tac) throws IOException, InterruptedException {
683      return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit)split).getSplitKeys());
684    }
685
686    @Override
687    public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
688      Configuration conf = context.getConfiguration();
689      Path snapshotDir = new Path(conf.get(CONF_SNAPSHOT_DIR));
690      FileSystem fs = FileSystem.get(snapshotDir.toUri(), conf);
691
692      List<Pair<SnapshotFileInfo, Long>> snapshotFiles = getSnapshotFiles(conf, fs, snapshotDir);
693      int mappers = conf.getInt(CONF_NUM_SPLITS, 0);
694      if (mappers == 0 && snapshotFiles.size() > 0) {
695        mappers = 1 + (snapshotFiles.size() / conf.getInt(CONF_MAP_GROUP, 10));
696        mappers = Math.min(mappers, snapshotFiles.size());
697        conf.setInt(CONF_NUM_SPLITS, mappers);
698        conf.setInt(MR_NUM_MAPS, mappers);
699      }
700
701      List<List<Pair<SnapshotFileInfo, Long>>> groups = getBalancedSplits(snapshotFiles, mappers);
702      List<InputSplit> splits = new ArrayList(groups.size());
703      for (List<Pair<SnapshotFileInfo, Long>> files: groups) {
704        splits.add(new ExportSnapshotInputSplit(files));
705      }
706      return splits;
707    }
708
709    private static class ExportSnapshotInputSplit extends InputSplit implements Writable {
710      private List<Pair<BytesWritable, Long>> files;
711      private long length;
712
713      public ExportSnapshotInputSplit() {
714        this.files = null;
715      }
716
717      public ExportSnapshotInputSplit(final List<Pair<SnapshotFileInfo, Long>> snapshotFiles) {
718        this.files = new ArrayList(snapshotFiles.size());
719        for (Pair<SnapshotFileInfo, Long> fileInfo: snapshotFiles) {
720          this.files.add(new Pair<>(
721            new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond()));
722          this.length += fileInfo.getSecond();
723        }
724      }
725
726      private List<Pair<BytesWritable, Long>> getSplitKeys() {
727        return files;
728      }
729
730      @Override
731      public long getLength() throws IOException, InterruptedException {
732        return length;
733      }
734
735      @Override
736      public String[] getLocations() throws IOException, InterruptedException {
737        return new String[] {};
738      }
739
740      @Override
741      public void readFields(DataInput in) throws IOException {
742        int count = in.readInt();
743        files = new ArrayList<>(count);
744        length = 0;
745        for (int i = 0; i < count; ++i) {
746          BytesWritable fileInfo = new BytesWritable();
747          fileInfo.readFields(in);
748          long size = in.readLong();
749          files.add(new Pair<>(fileInfo, size));
750          length += size;
751        }
752      }
753
754      @Override
755      public void write(DataOutput out) throws IOException {
756        out.writeInt(files.size());
757        for (final Pair<BytesWritable, Long> fileInfo: files) {
758          fileInfo.getFirst().write(out);
759          out.writeLong(fileInfo.getSecond());
760        }
761      }
762    }
763
764    private static class ExportSnapshotRecordReader
765        extends RecordReader<BytesWritable, NullWritable> {
766      private final List<Pair<BytesWritable, Long>> files;
767      private long totalSize = 0;
768      private long procSize = 0;
769      private int index = -1;
770
771      ExportSnapshotRecordReader(final List<Pair<BytesWritable, Long>> files) {
772        this.files = files;
773        for (Pair<BytesWritable, Long> fileInfo: files) {
774          totalSize += fileInfo.getSecond();
775        }
776      }
777
778      @Override
779      public void close() { }
780
781      @Override
782      public BytesWritable getCurrentKey() { return files.get(index).getFirst(); }
783
784      @Override
785      public NullWritable getCurrentValue() { return NullWritable.get(); }
786
787      @Override
788      public float getProgress() { return (float)procSize / totalSize; }
789
790      @Override
791      public void initialize(InputSplit split, TaskAttemptContext tac) { }
792
793      @Override
794      public boolean nextKeyValue() {
795        if (index >= 0) {
796          procSize += files.get(index).getSecond();
797        }
798        return(++index < files.size());
799      }
800    }
801  }
802
803  // ==========================================================================
804  //  Tool
805  // ==========================================================================
806
807  /**
808   * Run Map-Reduce Job to perform the files copy.
809   */
810  private void runCopyJob(final Path inputRoot, final Path outputRoot,
811      final String snapshotName, final Path snapshotDir, final boolean verifyChecksum,
812      final String filesUser, final String filesGroup, final int filesMode,
813      final int mappers, final int bandwidthMB)
814          throws IOException, InterruptedException, ClassNotFoundException {
815    Configuration conf = getConf();
816    if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup);
817    if (filesUser != null) conf.set(CONF_FILES_USER, filesUser);
818    if (mappers > 0) {
819      conf.setInt(CONF_NUM_SPLITS, mappers);
820      conf.setInt(MR_NUM_MAPS, mappers);
821    }
822    conf.setInt(CONF_FILES_MODE, filesMode);
823    conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum);
824    conf.set(CONF_OUTPUT_ROOT, outputRoot.toString());
825    conf.set(CONF_INPUT_ROOT, inputRoot.toString());
826    conf.setInt(CONF_BANDWIDTH_MB, bandwidthMB);
827    conf.set(CONF_SNAPSHOT_NAME, snapshotName);
828    conf.set(CONF_SNAPSHOT_DIR, snapshotDir.toString());
829
830    String jobname = conf.get(CONF_MR_JOB_NAME, "ExportSnapshot-" + snapshotName);
831    Job job = new Job(conf);
832    job.setJobName(jobname);
833    job.setJarByClass(ExportSnapshot.class);
834    TableMapReduceUtil.addDependencyJars(job);
835    job.setMapperClass(ExportMapper.class);
836    job.setInputFormatClass(ExportSnapshotInputFormat.class);
837    job.setOutputFormatClass(NullOutputFormat.class);
838    job.setMapSpeculativeExecution(false);
839    job.setNumReduceTasks(0);
840
841    // Acquire the delegation Tokens
842    Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
843    TokenCache.obtainTokensForNamenodes(job.getCredentials(),
844      new Path[] { inputRoot }, srcConf);
845    Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
846    TokenCache.obtainTokensForNamenodes(job.getCredentials(),
847        new Path[] { outputRoot }, destConf);
848
849    // Run the MR Job
850    if (!job.waitForCompletion(true)) {
851      throw new ExportSnapshotException(job.getStatus().getFailureInfo());
852    }
853  }
854
855  private void verifySnapshot(final Configuration baseConf,
856      final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException {
857    // Update the conf with the current root dir, since may be a different cluster
858    Configuration conf = new Configuration(baseConf);
859    CommonFSUtils.setRootDir(conf, rootDir);
860    CommonFSUtils.setFsDefault(conf, CommonFSUtils.getRootDir(conf));
861    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
862    SnapshotReferenceUtil.verifySnapshot(conf, fs, snapshotDir, snapshotDesc);
863  }
864
865  private void setConfigParallel(FileSystem outputFs, List<Path> traversedPath,
866      BiConsumer<FileSystem, Path> task, Configuration conf) throws IOException {
867    ExecutorService pool = Executors
868        .newFixedThreadPool(conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS));
869    List<Future<Void>> futures = new ArrayList<>();
870    for (Path dstPath : traversedPath) {
871      Future<Void> future = (Future<Void>) pool.submit(() -> task.accept(outputFs, dstPath));
872      futures.add(future);
873    }
874    try {
875      for (Future<Void> future : futures) {
876        future.get();
877      }
878    } catch (InterruptedException | ExecutionException e) {
879      throw new IOException(e);
880    } finally {
881      pool.shutdownNow();
882    }
883  }
884
885  private void setOwnerParallel(FileSystem outputFs, String filesUser, String filesGroup,
886      Configuration conf, List<Path> traversedPath) throws IOException {
887    setConfigParallel(outputFs, traversedPath, (fs, path) -> {
888      try {
889        fs.setOwner(path, filesUser, filesGroup);
890      } catch (IOException e) {
891        throw new RuntimeException(
892            "set owner for file " + path + " to " + filesUser + ":" + filesGroup + " failed", e);
893      }
894    }, conf);
895  }
896
897  private void setPermissionParallel(final FileSystem outputFs, final short filesMode,
898      final List<Path> traversedPath, final Configuration conf) throws IOException {
899    if (filesMode <= 0) {
900      return;
901    }
902    FsPermission perm = new FsPermission(filesMode);
903    setConfigParallel(outputFs, traversedPath, (fs, path) -> {
904      try {
905        fs.setPermission(path, perm);
906      } catch (IOException e) {
907        throw new RuntimeException(
908            "set permission for file " + path + " to " + filesMode + " failed", e);
909      }
910    }, conf);
911  }
912
913  private boolean verifyTarget = true;
914  private boolean verifyChecksum = true;
915  private String snapshotName = null;
916  private String targetName = null;
917  private boolean overwrite = false;
918  private String filesGroup = null;
919  private String filesUser = null;
920  private Path outputRoot = null;
921  private Path inputRoot = null;
922  private int bandwidthMB = Integer.MAX_VALUE;
923  private int filesMode = 0;
924  private int mappers = 0;
925
926  @Override
927  protected void processOptions(CommandLine cmd) {
928    snapshotName = cmd.getOptionValue(Options.SNAPSHOT.getLongOpt(), snapshotName);
929    targetName = cmd.getOptionValue(Options.TARGET_NAME.getLongOpt(), targetName);
930    if (cmd.hasOption(Options.COPY_TO.getLongOpt())) {
931      outputRoot = new Path(cmd.getOptionValue(Options.COPY_TO.getLongOpt()));
932    }
933    if (cmd.hasOption(Options.COPY_FROM.getLongOpt())) {
934      inputRoot = new Path(cmd.getOptionValue(Options.COPY_FROM.getLongOpt()));
935    }
936    mappers = getOptionAsInt(cmd, Options.MAPPERS.getLongOpt(), mappers);
937    filesUser = cmd.getOptionValue(Options.CHUSER.getLongOpt(), filesUser);
938    filesGroup = cmd.getOptionValue(Options.CHGROUP.getLongOpt(), filesGroup);
939    filesMode = getOptionAsInt(cmd, Options.CHMOD.getLongOpt(), filesMode, 8);
940    bandwidthMB = getOptionAsInt(cmd, Options.BANDWIDTH.getLongOpt(), bandwidthMB);
941    overwrite = cmd.hasOption(Options.OVERWRITE.getLongOpt());
942    // And verifyChecksum and verifyTarget with values read from old args in processOldArgs(...).
943    verifyChecksum = !cmd.hasOption(Options.NO_CHECKSUM_VERIFY.getLongOpt());
944    verifyTarget = !cmd.hasOption(Options.NO_TARGET_VERIFY.getLongOpt());
945  }
946
947  /**
948   * Execute the export snapshot by copying the snapshot metadata, hfiles and wals.
949   * @return 0 on success, and != 0 upon failure.
950   */
951  @Override
952  public int doWork() throws IOException {
953    Configuration conf = getConf();
954
955    // Check user options
956    if (snapshotName == null) {
957      System.err.println("Snapshot name not provided.");
958      LOG.error("Use -h or --help for usage instructions.");
959      return 0;
960    }
961
962    if (outputRoot == null) {
963      System.err.println("Destination file-system (--" + Options.COPY_TO.getLongOpt()
964              + ") not provided.");
965      LOG.error("Use -h or --help for usage instructions.");
966      return 0;
967    }
968
969    if (targetName == null) {
970      targetName = snapshotName;
971    }
972    if (inputRoot == null) {
973      inputRoot = CommonFSUtils.getRootDir(conf);
974    } else {
975      CommonFSUtils.setRootDir(conf, inputRoot);
976    }
977
978    Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
979    srcConf.setBoolean("fs." + inputRoot.toUri().getScheme() + ".impl.disable.cache", true);
980    FileSystem inputFs = FileSystem.get(inputRoot.toUri(), srcConf);
981    Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
982    destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true);
983    FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
984    boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) ||
985        conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null;
986    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
987    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot,
988        destConf);
989    Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
990    Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir;
991    LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot);
992    LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}",
993      outputFs, outputRoot.toString(), skipTmp, initialOutputSnapshotDir);
994
995    // Find the necessary directory which need to change owner and group
996    Path needSetOwnerDir = SnapshotDescriptionUtils.getSnapshotRootDir(outputRoot);
997    if (outputFs.exists(needSetOwnerDir)) {
998      if (skipTmp) {
999        needSetOwnerDir = outputSnapshotDir;
1000      } else {
1001        needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot, destConf);
1002        if (outputFs.exists(needSetOwnerDir)) {
1003          needSetOwnerDir = snapshotTmpDir;
1004        }
1005      }
1006    }
1007
1008    // Check if the snapshot already exists
1009    if (outputFs.exists(outputSnapshotDir)) {
1010      if (overwrite) {
1011        if (!outputFs.delete(outputSnapshotDir, true)) {
1012          System.err.println("Unable to remove existing snapshot directory: " + outputSnapshotDir);
1013          return 1;
1014        }
1015      } else {
1016        System.err.println("The snapshot '" + targetName +
1017          "' already exists in the destination: " + outputSnapshotDir);
1018        return 1;
1019      }
1020    }
1021
1022    if (!skipTmp) {
1023      // Check if the snapshot already in-progress
1024      if (outputFs.exists(snapshotTmpDir)) {
1025        if (overwrite) {
1026          if (!outputFs.delete(snapshotTmpDir, true)) {
1027            System.err.println("Unable to remove existing snapshot tmp directory: "+snapshotTmpDir);
1028            return 1;
1029          }
1030        } else {
1031          System.err.println("A snapshot with the same name '"+ targetName +"' may be in-progress");
1032          System.err.println("Please check "+snapshotTmpDir+". If the snapshot has completed, ");
1033          System.err.println("consider removing "+snapshotTmpDir+" by using the -overwrite option");
1034          return 1;
1035        }
1036      }
1037    }
1038
1039    // Step 1 - Copy fs1:/.snapshot/<snapshot> to  fs2:/.snapshot/.tmp/<snapshot>
1040    // The snapshot references must be copied before the hfiles otherwise the cleaner
1041    // will remove them because they are unreferenced.
1042    List<Path> travesedPaths = new ArrayList<>();
1043    boolean copySucceeded = false;
1044    try {
1045      LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + initialOutputSnapshotDir);
1046      travesedPaths =
1047          FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf,
1048              conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS));
1049      copySucceeded = true;
1050    } catch (IOException e) {
1051      throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" +
1052        snapshotDir + " to=" + initialOutputSnapshotDir, e);
1053    } finally {
1054      if (copySucceeded) {
1055        if (filesUser != null || filesGroup != null) {
1056          LOG.warn((filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to "
1057              + filesUser)
1058              + (filesGroup == null ? "" : ", Change the group of " + needSetOwnerDir + " to "
1059                  + filesGroup));
1060          setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths);
1061        }
1062        if (filesMode > 0) {
1063          LOG.warn("Change the permission of " + needSetOwnerDir + " to " + filesMode);
1064          setPermissionParallel(outputFs, (short)filesMode, travesedPaths, conf);
1065        }
1066      }
1067    }
1068
1069    // Write a new .snapshotinfo if the target name is different from the source name
1070    if (!targetName.equals(snapshotName)) {
1071      SnapshotDescription snapshotDesc =
1072        SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir)
1073          .toBuilder()
1074          .setName(targetName)
1075          .build();
1076      SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDesc, initialOutputSnapshotDir, outputFs);
1077      if (filesUser != null || filesGroup != null) {
1078        outputFs.setOwner(new Path(initialOutputSnapshotDir,
1079          SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, filesGroup);
1080      }
1081      if (filesMode > 0) {
1082        outputFs.setPermission(new Path(initialOutputSnapshotDir,
1083          SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission((short)filesMode));
1084      }
1085    }
1086
1087    // Step 2 - Start MR Job to copy files
1088    // The snapshot references must be copied before the files otherwise the files gets removed
1089    // by the HFileArchiver, since they have no references.
1090    try {
1091      runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum,
1092                 filesUser, filesGroup, filesMode, mappers, bandwidthMB);
1093
1094      LOG.info("Finalize the Snapshot Export");
1095      if (!skipTmp) {
1096        // Step 3 - Rename fs2:/.snapshot/.tmp/<snapshot> fs2:/.snapshot/<snapshot>
1097        if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) {
1098          throw new ExportSnapshotException("Unable to rename snapshot directory from=" +
1099            snapshotTmpDir + " to=" + outputSnapshotDir);
1100        }
1101      }
1102
1103      // Step 4 - Verify snapshot integrity
1104      if (verifyTarget) {
1105        LOG.info("Verify snapshot integrity");
1106        verifySnapshot(destConf, outputFs, outputRoot, outputSnapshotDir);
1107      }
1108
1109      LOG.info("Export Completed: " + targetName);
1110      return 0;
1111    } catch (Exception e) {
1112      LOG.error("Snapshot export failed", e);
1113      if (!skipTmp) {
1114        outputFs.delete(snapshotTmpDir, true);
1115      }
1116      outputFs.delete(outputSnapshotDir, true);
1117      return 1;
1118    } finally {
1119      IOUtils.closeStream(inputFs);
1120      IOUtils.closeStream(outputFs);
1121    }
1122  }
1123
1124  @Override
1125  protected void printUsage() {
1126    super.printUsage();
1127    System.out.println("\n"
1128        + "Examples:\n"
1129        + "  hbase snapshot export \\\n"
1130        + "    --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n"
1131        + "    --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n"
1132        + "\n"
1133        + "  hbase snapshot export \\\n"
1134        + "    --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n"
1135        + "    --copy-to hdfs://srv1:50070/hbase");
1136  }
1137
1138  @Override protected void addOptions() {
1139    addRequiredOption(Options.SNAPSHOT);
1140    addOption(Options.COPY_TO);
1141    addOption(Options.COPY_FROM);
1142    addOption(Options.TARGET_NAME);
1143    addOption(Options.NO_CHECKSUM_VERIFY);
1144    addOption(Options.NO_TARGET_VERIFY);
1145    addOption(Options.OVERWRITE);
1146    addOption(Options.CHUSER);
1147    addOption(Options.CHGROUP);
1148    addOption(Options.CHMOD);
1149    addOption(Options.MAPPERS);
1150    addOption(Options.BANDWIDTH);
1151  }
1152
1153  public static void main(String[] args) {
1154    new ExportSnapshot().doStaticMain(args);
1155  }
1156}