View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  
20  package org.apache.hadoop.hbase.regionserver;
21  
22  import java.io.FileNotFoundException;
23  import java.io.IOException;
24  import java.io.InterruptedIOException;
25  import java.util.ArrayList;
26  import java.util.Collection;
27  import java.util.List;
28  import java.util.Map;
29  import java.util.UUID;
30  
31  import org.apache.commons.logging.Log;
32  import org.apache.commons.logging.LogFactory;
33  import org.apache.hadoop.hbase.classification.InterfaceAudience;
34  import org.apache.hadoop.conf.Configuration;
35  import org.apache.hadoop.fs.FSDataInputStream;
36  import org.apache.hadoop.fs.FSDataOutputStream;
37  import org.apache.hadoop.fs.FileStatus;
38  import org.apache.hadoop.fs.FileSystem;
39  import org.apache.hadoop.fs.FileUtil;
40  import org.apache.hadoop.fs.LocatedFileStatus;
41  import org.apache.hadoop.fs.Path;
42  import org.apache.hadoop.fs.permission.FsPermission;
43  import org.apache.hadoop.hbase.HColumnDescriptor;
44  import org.apache.hadoop.hbase.HConstants;
45  import org.apache.hadoop.hbase.HRegionInfo;
46  import org.apache.hadoop.hbase.HTableDescriptor;
47  import org.apache.hadoop.hbase.KeyValue;
48  import org.apache.hadoop.hbase.KeyValueUtil;
49  import org.apache.hadoop.hbase.backup.HFileArchiver;
50  import org.apache.hadoop.hbase.fs.HFileSystem;
51  import org.apache.hadoop.hbase.io.Reference;
52  import org.apache.hadoop.hbase.util.Bytes;
53  import org.apache.hadoop.hbase.util.FSHDFSUtils;
54  import org.apache.hadoop.hbase.util.FSUtils;
55  import org.apache.hadoop.hbase.util.Pair;
56  import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
57  
58  import com.google.common.collect.Lists;
59  
60  /**
61   * View to an on-disk Region.
62   * Provides the set of methods necessary to interact with the on-disk region data.
63   */
64  @InterfaceAudience.Private
65  public class HRegionFileSystem {
66    private static final Log LOG = LogFactory.getLog(HRegionFileSystem.class);
67  
68    /** Name of the region info file that resides just under the region directory. */
69    public final static String REGION_INFO_FILE = ".regioninfo";
70  
71    /** Temporary subdirectory of the region directory used for merges. */
72    public static final String REGION_MERGES_DIR = ".merges";
73  
74    /** Temporary subdirectory of the region directory used for splits. */
75    public static final String REGION_SPLITS_DIR = ".splits";
76  
77    /** Temporary subdirectory of the region directory used for compaction output. */
78    static final String REGION_TEMP_DIR = ".tmp";
79  
80    private final HRegionInfo regionInfo;
81    //regionInfo for interacting with FS (getting encodedName, etc)
82    private final HRegionInfo regionInfoForFs;
83    private final Configuration conf;
84    private final Path tableDir;
85    private final FileSystem fs;
86  
87    /**
88     * In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the
89     * client level.
90     */
91    private final int hdfsClientRetriesNumber;
92    private final int baseSleepBeforeRetries;
93    private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10;
94    private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000;
95  
96    /**
97     * Create a view to the on-disk region
98     * @param conf the {@link Configuration} to use
99     * @param fs {@link FileSystem} that contains the region
100    * @param tableDir {@link Path} to where the table is being stored
101    * @param regionInfo {@link HRegionInfo} for region
102    */
103   HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir,
104       final HRegionInfo regionInfo) {
105     this.fs = fs;
106     this.conf = conf;
107     this.tableDir = tableDir;
108     this.regionInfo = regionInfo;
109     this.regionInfoForFs = ServerRegionReplicaUtil.getRegionInfoForFs(regionInfo);
110     this.hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
111       DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
112     this.baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
113       DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
114  }
115 
116   /** @return the underlying {@link FileSystem} */
117   public FileSystem getFileSystem() {
118     return this.fs;
119   }
120 
121   /** @return the {@link HRegionInfo} that describe this on-disk region view */
122   public HRegionInfo getRegionInfo() {
123     return this.regionInfo;
124   }
125 
126   public HRegionInfo getRegionInfoForFS() {
127     return this.regionInfoForFs;
128   }
129 
130   /** @return {@link Path} to the region's root directory. */
131   public Path getTableDir() {
132     return this.tableDir;
133   }
134 
135   /** @return {@link Path} to the region directory. */
136   public Path getRegionDir() {
137     return new Path(this.tableDir, this.regionInfoForFs.getEncodedName());
138   }
139 
140   // ===========================================================================
141   //  Temp Helpers
142   // ===========================================================================
143   /** @return {@link Path} to the region's temp directory, used for file creations */
144   Path getTempDir() {
145     return new Path(getRegionDir(), REGION_TEMP_DIR);
146   }
147 
148   /**
149    * Clean up any temp detritus that may have been left around from previous operation attempts.
150    */
151   void cleanupTempDir() throws IOException {
152     deleteDir(getTempDir());
153   }
154 
155   // ===========================================================================
156   //  Store/StoreFile Helpers
157   // ===========================================================================
158   /**
159    * Returns the directory path of the specified family
160    * @param familyName Column Family Name
161    * @return {@link Path} to the directory of the specified family
162    */
163   public Path getStoreDir(final String familyName) {
164     return new Path(this.getRegionDir(), familyName);
165   }
166 
167   /**
168    * Create the store directory for the specified family name
169    * @param familyName Column Family Name
170    * @return {@link Path} to the directory of the specified family
171    * @throws IOException if the directory creation fails.
172    */
173   Path createStoreDir(final String familyName) throws IOException {
174     Path storeDir = getStoreDir(familyName);
175     if(!fs.exists(storeDir) && !createDir(storeDir))
176       throw new IOException("Failed creating "+storeDir);
177     return storeDir;
178   }
179 
180   /**
181    * Set storage policy for a given column family.
182    * <p>
183    * If we're running on a version of HDFS that doesn't support the given storage policy
184    * (or storage policies at all), then we'll issue a log message and continue.
185    * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
186    * for possible list e.g 'COLD', 'WARM', 'HOT', 'ONE_SSD', 'ALL_SSD', 'LAZY_PERSIST'.
187    *
188    * @param familyName The name of column family.
189    * @param policyName The name of the storage policy
190    */
191   public void setStoragePolicy(String familyName, String policyName) throws IOException {
192     if (this.fs instanceof HFileSystem) {
193       FSUtils.setStoragePolicy(
194           ((HFileSystem) this.fs).getBackingFs(),
195           getStoreDir(familyName), policyName);
196     } else {
197       FSUtils.setStoragePolicy(this.fs, getStoreDir(familyName), policyName);
198     }
199   }
200 
201   /**
202    * Get the storage policy of the directory of CF.
203    * @param familyName The name of column family.
204    * @return Storage policy name, or {@code null} if not using {@link HFileSystem} or exception
205    *         thrown when trying to get policy
206    */
207   public String getStoragePolicyName(String familyName) {
208     if (this.fs instanceof HFileSystem) {
209       Path storeDir = getStoreDir(familyName);
210       return ((HFileSystem) this.fs).getStoragePolicyName(storeDir);
211     }
212 
213     return null;
214   }
215 
216   /**
217    * Returns the store files available for the family.
218    * This methods performs the filtering based on the valid store files.
219    * @param familyName Column Family Name
220    * @return a set of {@link StoreFileInfo} for the specified family.
221    */
222   public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
223     return getStoreFiles(Bytes.toString(familyName));
224   }
225 
226   public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
227     return getStoreFiles(familyName, true);
228   }
229 
230   /**
231    * Returns the store files available for the family.
232    * This methods performs the filtering based on the valid store files.
233    * @param familyName Column Family Name
234    * @return a set of {@link StoreFileInfo} for the specified family.
235    */
236   public Collection<StoreFileInfo> getStoreFiles(final String familyName, final boolean validate)
237       throws IOException {
238     Path familyDir = getStoreDir(familyName);
239     FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
240     if (files == null) {
241       if (LOG.isTraceEnabled()) {
242         LOG.trace("No StoreFiles for: " + familyDir);
243       }
244       return null;
245     }
246 
247     ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
248     for (FileStatus status: files) {
249       if (validate && !StoreFileInfo.isValid(status)) {
250         LOG.warn("Invalid StoreFile: " + status.getPath());
251         continue;
252       }
253       StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
254         regionInfoForFs, familyName, status.getPath());
255       storeFiles.add(info);
256 
257     }
258     return storeFiles;
259   }
260 
261   /**
262    * Returns the store files' LocatedFileStatus which available for the family.
263    * This methods performs the filtering based on the valid store files.
264    * @param familyName Column Family Name
265    * @return a list of store files' LocatedFileStatus for the specified family.
266    */
267   public static List<LocatedFileStatus> getStoreFilesLocatedStatus(
268       final HRegionFileSystem regionfs, final String familyName,
269       final boolean validate) throws IOException {
270     Path familyDir = regionfs.getStoreDir(familyName);
271     List<LocatedFileStatus> locatedFileStatuses = FSUtils.listLocatedStatus(
272         regionfs.getFileSystem(), familyDir);
273     if (locatedFileStatuses == null) {
274       if (LOG.isTraceEnabled()) {
275         LOG.trace("No StoreFiles for: " + familyDir);
276       }
277       return null;
278     }
279 
280     List<LocatedFileStatus> validStoreFiles = Lists.newArrayList();
281     for (LocatedFileStatus status : locatedFileStatuses) {
282       if (validate && !StoreFileInfo.isValid(status)) {
283         LOG.warn("Invalid StoreFile: " + status.getPath());
284       } else {
285         validStoreFiles.add(status);
286       }
287     }
288     return validStoreFiles;
289   }
290 
291   /**
292    * Return Qualified Path of the specified family/file
293    *
294    * @param familyName Column Family Name
295    * @param fileName File Name
296    * @return The qualified Path for the specified family/file
297    */
298   Path getStoreFilePath(final String familyName, final String fileName) {
299     Path familyDir = getStoreDir(familyName);
300     return new Path(familyDir, fileName).makeQualified(this.fs);
301   }
302 
303   /**
304    * Return the store file information of the specified family/file.
305    *
306    * @param familyName Column Family Name
307    * @param fileName File Name
308    * @return The {@link StoreFileInfo} for the specified family/file
309    */
310   StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
311       throws IOException {
312     Path familyDir = getStoreDir(familyName);
313     return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo,
314       regionInfoForFs, familyName, new Path(familyDir, fileName));
315   }
316 
317   /**
318    * Returns true if the specified family has reference files
319    * @param familyName Column Family Name
320    * @return true if family contains reference files
321    * @throws IOException
322    */
323   public boolean hasReferences(final String familyName) throws IOException {
324     FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName));
325     if (files != null) {
326       for(FileStatus stat: files) {
327         if(stat.isDirectory()) {
328           continue;
329         }
330         if(StoreFileInfo.isReference(stat.getPath())) {
331           return true;
332         }
333       }
334     }
335     return false;
336   }
337 
338   /**
339    * Check whether region has Reference file
340    * @param htd table desciptor of the region
341    * @return true if region has reference file
342    * @throws IOException
343    */
344   public boolean hasReferences(final HTableDescriptor htd) throws IOException {
345     for (HColumnDescriptor family : htd.getFamilies()) {
346       if (hasReferences(family.getNameAsString())) {
347         return true;
348       }
349     }
350     return false;
351   }
352 
353   /**
354    * @return the set of families present on disk
355    * @throws IOException
356    */
357   public Collection<String> getFamilies() throws IOException {
358     FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
359     if (fds == null) return null;
360 
361     ArrayList<String> families = new ArrayList<String>(fds.length);
362     for (FileStatus status: fds) {
363       families.add(status.getPath().getName());
364     }
365 
366     return families;
367   }
368 
369   /**
370    * Remove the region family from disk, archiving the store files.
371    * @param familyName Column Family Name
372    * @throws IOException if an error occours during the archiving
373    */
374   public void deleteFamily(final String familyName) throws IOException {
375     // archive family store files
376     HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));
377 
378     // delete the family folder
379     Path familyDir = getStoreDir(familyName);
380     if(fs.exists(familyDir) && !deleteDir(familyDir))
381       throw new IOException("Could not delete family " + familyName
382           + " from FileSystem for region " + regionInfoForFs.getRegionNameAsString() + "("
383           + regionInfoForFs.getEncodedName() + ")");
384   }
385 
386   /**
387    * Generate a unique file name, used by createTempName() and commitStoreFile()
388    * @param suffix extra information to append to the generated name
389    * @return Unique file name
390    */
391   private static String generateUniqueName(final String suffix) {
392     String name = UUID.randomUUID().toString().replaceAll("-", "");
393     if (suffix != null) name += suffix;
394     return name;
395   }
396 
397   /**
398    * Generate a unique temporary Path. Used in conjuction with commitStoreFile()
399    * to get a safer file creation.
400    * <code>
401    * Path file = fs.createTempName();
402    * ...StoreFile.Writer(file)...
403    * fs.commitStoreFile("family", file);
404    * </code>
405    *
406    * @return Unique {@link Path} of the temporary file
407    */
408   public Path createTempName() {
409     return createTempName(null);
410   }
411 
412   /**
413    * Generate a unique temporary Path. Used in conjuction with commitStoreFile()
414    * to get a safer file creation.
415    * <code>
416    * Path file = fs.createTempName();
417    * ...StoreFile.Writer(file)...
418    * fs.commitStoreFile("family", file);
419    * </code>
420    *
421    * @param suffix extra information to append to the generated name
422    * @return Unique {@link Path} of the temporary file
423    */
424   public Path createTempName(final String suffix) {
425     return new Path(getTempDir(), generateUniqueName(suffix));
426   }
427 
428   /**
429    * Move the file from a build/temp location to the main family store directory.
430    * @param familyName Family that will gain the file
431    * @param buildPath {@link Path} to the file to commit.
432    * @return The new {@link Path} of the committed file
433    * @throws IOException
434    */
435   public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
436     Path dstPath = preCommitStoreFile(familyName, buildPath, -1, false);
437     return commitStoreFile(buildPath, dstPath);
438   }
439 
440   /**
441    * Generate the filename in the main family store directory for moving the file from a build/temp
442    *  location.
443    * @param familyName Family that will gain the file
444    * @param buildPath {@link Path} to the file to commit.
445    * @param seqNum Sequence Number to append to the file name (less then 0 if no sequence number)
446    * @param generateNewName False if you want to keep the buildPath name
447    * @return The new {@link Path} of the to be committed file
448    * @throws IOException
449    */
450   private Path preCommitStoreFile(final String familyName, final Path buildPath,
451       final long seqNum, final boolean generateNewName) throws IOException {
452     Path storeDir = getStoreDir(familyName);
453     if(!fs.exists(storeDir) && !createDir(storeDir))
454       throw new IOException("Failed creating " + storeDir);
455 
456     String name = buildPath.getName();
457     if (generateNewName) {
458       name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
459     }
460     Path dstPath = new Path(storeDir, name);
461     if (!fs.exists(buildPath)) {
462       throw new FileNotFoundException(buildPath.toString());
463     }
464     if (LOG.isDebugEnabled()) {
465       LOG.debug("Committing store file " + buildPath + " as " + dstPath);
466     }
467     return dstPath;
468   }
469 
470   /*
471    * Moves file from staging dir to region dir
472    * @param buildPath {@link Path} to the file to commit.
473    * @param dstPath {@link Path} to the file under region dir
474    * @return The {@link Path} of the committed file
475    * @throws IOException
476    */
477   Path commitStoreFile(final Path buildPath, Path dstPath) throws IOException {
478     // buildPath exists, therefore not doing an exists() check.
479     if (!rename(buildPath, dstPath)) {
480       throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
481     }
482     return dstPath;
483   }
484 
485   /**
486    * Moves multiple store files to the relative region's family store directory.
487    * @param storeFiles list of store files divided by family
488    * @throws IOException
489    */
490   void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
491     for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
492       String familyName = Bytes.toString(es.getKey());
493       for (StoreFile sf: es.getValue()) {
494         commitStoreFile(familyName, sf.getPath());
495       }
496     }
497   }
498 
499   /**
500    * Archives the specified store file from the specified family.
501    * @param familyName Family that contains the store files
502    * @param filePath {@link Path} to the store file to remove
503    * @throws IOException if the archiving fails
504    */
505   public void removeStoreFile(final String familyName, final Path filePath)
506       throws IOException {
507     HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfoForFs,
508         this.tableDir, Bytes.toBytes(familyName), filePath);
509   }
510 
511   /**
512    * Closes and archives the specified store files from the specified family.
513    * @param familyName Family that contains the store files
514    * @param storeFiles set of store files to remove
515    * @throws IOException if the archiving fails
516    */
517   public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
518       throws IOException {
519     HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
520         this.tableDir, Bytes.toBytes(familyName), storeFiles);
521   }
522 
523   /**
524    * Bulk load: Add a specified store file to the specified family.
525    * If the source file is on the same different file-system is moved from the
526    * source location to the destination location, otherwise is copied over.
527    *
528    * @param familyName Family that will gain the file
529    * @param srcPath {@link Path} to the file to import
530    * @param seqNum Bulk Load sequence number
531    * @return The destination {@link Path} of the bulk loaded file
532    * @throws IOException
533    */
534   Pair<Path, Path> bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
535       throws IOException {
536     // Copy the file if it's on another filesystem
537     FileSystem srcFs = srcPath.getFileSystem(conf);
538     srcPath = srcFs.resolvePath(srcPath);
539     FileSystem realSrcFs = srcPath.getFileSystem(conf);
540     FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
541 
542     // We can't compare FileSystem instances as equals() includes UGI instance
543     // as part of the comparison and won't work when doing SecureBulkLoad
544     // TODO deal with viewFS
545     if (!FSHDFSUtils.isSameHdfs(conf, realSrcFs, desFs)) {
546       LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
547           "the destination store. Copying file over to destination filesystem.");
548       Path tmpPath = createTempName();
549       FileUtil.copy(realSrcFs, srcPath, fs, tmpPath, false, conf);
550       LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
551       srcPath = tmpPath;
552     }
553 
554     return new Pair<>(srcPath, preCommitStoreFile(familyName, srcPath, seqNum, true));
555   }
556 
557   // ===========================================================================
558   //  Splits Helpers
559   // ===========================================================================
560   /** @return {@link Path} to the temp directory used during split operations */
561   Path getSplitsDir() {
562     return new Path(getRegionDir(), REGION_SPLITS_DIR);
563   }
564 
565   Path getSplitsDir(final HRegionInfo hri) {
566     return new Path(getSplitsDir(), hri.getEncodedName());
567   }
568 
569   /**
570    * Clean up any split detritus that may have been left around from previous split attempts.
571    */
572   void cleanupSplitsDir() throws IOException {
573     deleteDir(getSplitsDir());
574   }
575 
576   /**
577    * Clean up any split detritus that may have been left around from previous
578    * split attempts.
579    * Call this method on initial region deploy.
580    * @throws IOException
581    */
582   void cleanupAnySplitDetritus() throws IOException {
583     Path splitdir = this.getSplitsDir();
584     if (!fs.exists(splitdir)) return;
585     // Look at the splitdir.  It could have the encoded names of the daughter
586     // regions we tried to make.  See if the daughter regions actually got made
587     // out under the tabledir.  If here under splitdir still, then the split did
588     // not complete.  Try and do cleanup.  This code WILL NOT catch the case
589     // where we successfully created daughter a but regionserver crashed during
590     // the creation of region b.  In this case, there'll be an orphan daughter
591     // dir in the filesystem.  TOOD: Fix.
592     FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
593     if (daughters != null) {
594       for (FileStatus daughter: daughters) {
595         Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
596         if (fs.exists(daughterDir) && !deleteDir(daughterDir)) {
597           throw new IOException("Failed delete of " + daughterDir);
598         }
599       }
600     }
601     cleanupSplitsDir();
602     LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
603   }
604 
605   /**
606    * Remove daughter region
607    * @param regionInfo daughter {@link HRegionInfo}
608    * @throws IOException
609    */
610   void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
611     Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
612     if (this.fs.exists(regionDir) && !deleteDir(regionDir)) {
613       throw new IOException("Failed delete of " + regionDir);
614     }
615   }
616 
617   /**
618    * Commit a daughter region, moving it from the split temporary directory
619    * to the proper location in the filesystem.
620    *
621    * @param regionInfo                 daughter {@link org.apache.hadoop.hbase.HRegionInfo}
622    * @throws IOException
623    */
624   Path commitDaughterRegion(final HRegionInfo regionInfo)
625       throws IOException {
626     Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
627     Path daughterTmpDir = this.getSplitsDir(regionInfo);
628 
629     if (fs.exists(daughterTmpDir)) {
630 
631       // Write HRI to a file in case we need to recover hbase:meta
632       Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE);
633       byte[] regionInfoContent = getRegionInfoFileContent(regionInfo);
634       writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
635 
636       // Move the daughter temp dir to the table dir
637       if (!rename(daughterTmpDir, regionDir)) {
638         throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
639       }
640     }
641 
642     return regionDir;
643   }
644 
645   /**
646    * Create the region splits directory.
647    */
648   void createSplitsDir(HRegionInfo daughterA, HRegionInfo daughterB) throws IOException {
649     Path splitdir = getSplitsDir();
650     if (fs.exists(splitdir)) {
651       LOG.info("The " + splitdir + " directory exists.  Hence deleting it to recreate it");
652       if (!deleteDir(splitdir)) {
653         throw new IOException("Failed deletion of " + splitdir + " before creating them again.");
654       }
655     }
656     // splitDir doesn't exists now. No need to do an exists() call for it.
657     if (!createDir(splitdir)) {
658       throw new IOException("Failed create of " + splitdir);
659     }
660     Path daughterATmpDir = getSplitsDir(daughterA);
661     if (!createDir(daughterATmpDir)) {
662       throw new IOException("Failed create of " + daughterATmpDir);
663     }
664     Path daughterBTmpDir = getSplitsDir(daughterB);
665     if (!createDir(daughterBTmpDir)) {
666       throw new IOException("Failed create of " + daughterBTmpDir);
667     }
668   }
669 
670   /**
671    * Write out a split reference. Package local so it doesnt leak out of
672    * regionserver.
673    * @param hri {@link HRegionInfo} of the destination
674    * @param familyName Column Family Name
675    * @param f File to split.
676    * @param splitRow Split Row
677    * @param top True if we are referring to the top half of the hfile.
678    * @return Path to created reference.
679    * @param splitPolicy
680    * @throws IOException
681    */
682   Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f,
683       final byte[] splitRow, final boolean top, RegionSplitPolicy splitPolicy) throws IOException {
684 
685     if (splitPolicy == null || !splitPolicy.skipStoreFileRangeCheck(familyName)) {
686       // Check whether the split row lies in the range of the store file
687       // If it is outside the range, return directly.
688       try {
689         if (top) {
690           //check if larger than last key.
691           KeyValue splitKey = KeyValueUtil.createFirstOnRow(splitRow);
692           byte[] lastKey = f.getLastKey();
693           // If lastKey is null means storefile is empty.
694           if (lastKey == null) {
695             return null;
696           }
697           if (f.getComparator().compareFlatKey(splitKey.getBuffer(),
698             splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) {
699             return null;
700           }
701         } else {
702           //check if smaller than first key
703           KeyValue splitKey = KeyValueUtil.createLastOnRow(splitRow);
704           byte[] firstKey = f.getFirstKey();
705           // If firstKey is null means storefile is empty.
706           if (firstKey == null) {
707             return null;
708           }
709           if (f.getComparator().compareFlatKey(splitKey.getBuffer(),
710             splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) {
711             return null;
712           }
713         }
714       } finally {
715         f.closeReader(f.getCacheConf() != null ? f.getCacheConf().shouldEvictOnClose() : true);
716       }
717     }
718 
719     Path splitDir = new Path(getSplitsDir(hri), familyName);
720     // A reference to the bottom half of the hsf store file.
721     Reference r =
722       top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
723     // Add the referred-to regions name as a dot separated suffix.
724     // See REF_NAME_REGEX regex above.  The referred-to regions name is
725     // up in the path of the passed in <code>f</code> -- parentdir is family,
726     // then the directory above is the region name.
727     String parentRegionName = regionInfoForFs.getEncodedName();
728     // Write reference with same file id only with the other region name as
729     // suffix and into the new region location (under same family).
730     Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
731     return r.write(fs, p);
732   }
733 
734   // ===========================================================================
735   //  Merge Helpers
736   // ===========================================================================
737   /** @return {@link Path} to the temp directory used during merge operations */
738   Path getMergesDir() {
739     return new Path(getRegionDir(), REGION_MERGES_DIR);
740   }
741 
742   Path getMergesDir(final HRegionInfo hri) {
743     return new Path(getMergesDir(), hri.getEncodedName());
744   }
745 
746   /**
747    * Clean up any merge detritus that may have been left around from previous merge attempts.
748    */
749   void cleanupMergesDir() throws IOException {
750     deleteDir(getMergesDir());
751   }
752 
753   /**
754    * Remove merged region
755    * @param mergedRegion {@link HRegionInfo}
756    * @throws IOException
757    */
758   void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException {
759     Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName());
760     if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
761       throw new IOException("Failed delete of " + regionDir);
762     }
763   }
764 
765   static boolean mkdirs(FileSystem fs, Configuration conf, Path dir) throws IOException {
766     if (FSUtils.isDistributedFileSystem(fs) ||
767         !conf.getBoolean(HConstants.ENABLE_DATA_FILE_UMASK, false)) {
768       return fs.mkdirs(dir);
769     }
770     FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
771     return fs.mkdirs(dir, perms);
772   }
773   /**
774    * Create the region merges directory.
775    * @throws IOException If merges dir already exists or we fail to create it.
776    * @see HRegionFileSystem#cleanupMergesDir()
777    */
778   void createMergesDir() throws IOException {
779     Path mergesdir = getMergesDir();
780     if (fs.exists(mergesdir)) {
781       LOG.info("The " + mergesdir
782           + " directory exists.  Hence deleting it to recreate it");
783       if (!fs.delete(mergesdir, true)) {
784         throw new IOException("Failed deletion of " + mergesdir
785             + " before creating them again.");
786       }
787     }
788     if (!mkdirs(fs, conf, mergesdir))
789       throw new IOException("Failed create of " + mergesdir);
790   }
791 
792   /**
793    * Write out a merge reference under the given merges directory. Package local
794    * so it doesnt leak out of regionserver.
795    * @param mergedRegion {@link HRegionInfo} of the merged region
796    * @param familyName Column Family Name
797    * @param f File to create reference.
798    * @param mergedDir
799    * @return Path to created reference.
800    * @throws IOException
801    */
802   Path mergeStoreFile(final HRegionInfo mergedRegion, final String familyName,
803       final StoreFile f, final Path mergedDir)
804       throws IOException {
805     Path referenceDir = new Path(new Path(mergedDir,
806         mergedRegion.getEncodedName()), familyName);
807     // A whole reference to the store file.
808     Reference r = Reference.createTopReference(regionInfoForFs.getStartKey());
809     // Add the referred-to regions name as a dot separated suffix.
810     // See REF_NAME_REGEX regex above. The referred-to regions name is
811     // up in the path of the passed in <code>f</code> -- parentdir is family,
812     // then the directory above is the region name.
813     String mergingRegionName = regionInfoForFs.getEncodedName();
814     // Write reference with same file id only with the other region name as
815     // suffix and into the new region location (under same family).
816     Path p = new Path(referenceDir, f.getPath().getName() + "."
817         + mergingRegionName);
818     return r.write(fs, p);
819   }
820 
821   /**
822    * Commit a merged region, moving it from the merges temporary directory to
823    * the proper location in the filesystem.
824    * @param mergedRegionInfo merged region {@link HRegionInfo}
825    * @throws IOException
826    */
827   void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException {
828     Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName());
829     Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo);
830     // Move the tmp dir in the expected location
831     if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) {
832       if (!fs.rename(mergedRegionTmpDir, regionDir)) {
833         throw new IOException("Unable to rename " + mergedRegionTmpDir + " to "
834             + regionDir);
835       }
836     }
837   }
838 
839   // ===========================================================================
840   //  Create/Open/Delete Helpers
841   // ===========================================================================
842   /**
843    * Log the current state of the region
844    * @param LOG log to output information
845    * @throws IOException if an unexpected exception occurs
846    */
847   void logFileSystemState(final Log LOG) throws IOException {
848     FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
849   }
850 
851   /**
852    * @param hri
853    * @return Content of the file we write out to the filesystem under a region
854    * @throws IOException
855    */
856   private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException {
857     return hri.toDelimitedByteArray();
858   }
859 
860   /**
861    * Create a {@link HRegionInfo} from the serialized version on-disk.
862    * @param fs {@link FileSystem} that contains the Region Info file
863    * @param regionDir {@link Path} to the Region Directory that contains the Info file
864    * @return An {@link HRegionInfo} instance gotten from the Region Info file.
865    * @throws IOException if an error occurred during file open/read operation.
866    */
867   public static HRegionInfo loadRegionInfoFileContent(final FileSystem fs, final Path regionDir)
868       throws IOException {
869     FSDataInputStream in = fs.open(new Path(regionDir, REGION_INFO_FILE));
870     try {
871       return HRegionInfo.parseFrom(in);
872     } finally {
873       in.close();
874     }
875   }
876 
877   /**
878    * Write the .regioninfo file on-disk.
879    */
880   private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs,
881       final Path regionInfoFile, final byte[] content) throws IOException {
882     // First check to get the permissions
883     FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
884     // Write the RegionInfo file content
885     FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null);
886     try {
887       out.write(content);
888     } finally {
889       out.close();
890     }
891   }
892 
893   /**
894    * Write out an info file under the stored region directory. Useful recovering mangled regions.
895    * If the regionInfo already exists on-disk, then we fast exit.
896    */
897   void checkRegionInfoOnFilesystem() throws IOException {
898     // Compose the content of the file so we can compare to length in filesystem. If not same,
899     // rewrite it (it may have been written in the old format using Writables instead of pb). The
900     // pb version is much shorter -- we write now w/o the toString version -- so checking length
901     // only should be sufficient. I don't want to read the file every time to check if it pb
902     // serialized.
903     byte[] content = getRegionInfoFileContent(regionInfoForFs);
904 
905     // Verify if the region directory exists before opening a region. We need to do this since if
906     // the region directory doesn't exist we will re-create the region directory and a new HRI
907     // when HRegion.openHRegion() is called.
908     try {
909       FileStatus status = fs.getFileStatus(getRegionDir());
910     } catch (FileNotFoundException e) {
911       LOG.warn(getRegionDir() + " doesn't exist for region: " + regionInfoForFs.getEncodedName() +
912           " on table " + regionInfo.getTable());
913     }
914 
915     try {
916       Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
917       FileStatus status = fs.getFileStatus(regionInfoFile);
918       if (status != null && status.getLen() == content.length) {
919         // Then assume the content good and move on.
920         // NOTE: that the length is not sufficient to define the the content matches.
921         return;
922       }
923 
924       LOG.info("Rewriting .regioninfo file at: " + regionInfoFile);
925       if (!fs.delete(regionInfoFile, false)) {
926         throw new IOException("Unable to remove existing " + regionInfoFile);
927       }
928     } catch (FileNotFoundException e) {
929       LOG.warn(REGION_INFO_FILE + " file not found for region: " + regionInfoForFs.getEncodedName() +
930           " on table " + regionInfo.getTable());
931     }
932 
933     // Write HRI to a file in case we need to recover hbase:meta
934     writeRegionInfoOnFilesystem(content, true);
935   }
936 
937   /**
938    * Write out an info file under the region directory. Useful recovering mangled regions.
939    * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
940    */
941   private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException {
942     byte[] content = getRegionInfoFileContent(regionInfoForFs);
943     writeRegionInfoOnFilesystem(content, useTempDir);
944   }
945 
946   /**
947    * Write out an info file under the region directory. Useful recovering mangled regions.
948    * @param regionInfoContent serialized version of the {@link HRegionInfo}
949    * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation.
950    */
951   private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent,
952       final boolean useTempDir) throws IOException {
953     Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
954     if (useTempDir) {
955       // Create in tmpDir and then move into place in case we crash after
956       // create but before close. If we don't successfully close the file,
957       // subsequent region reopens will fail the below because create is
958       // registered in NN.
959 
960       // And then create the file
961       Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);
962 
963       // If datanode crashes or if the RS goes down just before the close is called while trying to
964       // close the created regioninfo file in the .tmp directory then on next
965       // creation we will be getting AlreadyCreatedException.
966       // Hence delete and create the file if exists.
967       if (FSUtils.isExists(fs, tmpPath)) {
968         FSUtils.delete(fs, tmpPath, true);
969       }
970 
971       // Write HRI to a file in case we need to recover hbase:meta
972       writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);
973 
974       // Move the created file to the original path
975       if (fs.exists(tmpPath) &&  !rename(tmpPath, regionInfoFile)) {
976         throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
977       }
978     } else {
979       // Write HRI to a file in case we need to recover hbase:meta
980       writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
981     }
982   }
983 
984   /**
985    * Create a new Region on file-system.
986    * @param conf the {@link Configuration} to use
987    * @param fs {@link FileSystem} from which to add the region
988    * @param tableDir {@link Path} to where the table is being stored
989    * @param regionInfo {@link HRegionInfo} for region to be added
990    * @throws IOException if the region creation fails due to a FileSystem exception.
991    */
992   public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf,
993       final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
994     HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
995 
996     // We only create a .regioninfo and the region directory if this is the default region replica
997     if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
998       Path regionDir = regionFs.getRegionDir();
999       if (fs.exists(regionDir)) {
1000         LOG.warn("Trying to create a region that already exists on disk: " + regionDir);
1001         throw new IOException("The specified region already exists on disk: " + regionDir);
1002       }
1003 
1004       // Create the region directory
1005       if (!createDirOnFileSystem(fs, conf, regionDir)) {
1006         LOG.warn("Unable to create the region directory: " + regionDir);
1007         throw new IOException("Unable to create region directory: " + regionDir);
1008       }
1009 
1010       // Write HRI to a file in case we need to recover hbase:meta
1011       regionFs.writeRegionInfoOnFilesystem(false);
1012     } else {
1013       if (LOG.isDebugEnabled())
1014         LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
1015     }
1016     return regionFs;
1017   }
1018 
1019   /**
1020    * Open Region from file-system.
1021    * @param conf the {@link Configuration} to use
1022    * @param fs {@link FileSystem} from which to add the region
1023    * @param tableDir {@link Path} to where the table is being stored
1024    * @param regionInfo {@link HRegionInfo} for region to be added
1025    * @param readOnly True if you don't want to edit the region data
1026    * @throws IOException if the region creation fails due to a FileSystem exception.
1027    */
1028   public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
1029       final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
1030       throws IOException {
1031     HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
1032     Path regionDir = regionFs.getRegionDir();
1033 
1034     if (!fs.exists(regionDir)) {
1035       LOG.warn("Trying to open a region that do not exists on disk: " + regionDir);
1036       throw new IOException("The specified region do not exists on disk: " + regionDir);
1037     }
1038 
1039     if (!readOnly) {
1040       // Cleanup temporary directories
1041       regionFs.cleanupTempDir();
1042       regionFs.cleanupSplitsDir();
1043       regionFs.cleanupMergesDir();
1044 
1045       // If it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta
1046       // Only create HRI if we are the default replica
1047       if (regionInfo.getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
1048         regionFs.checkRegionInfoOnFilesystem();
1049       } else {
1050         if (LOG.isDebugEnabled()) {
1051           LOG.debug("Skipping creation of .regioninfo file for " + regionInfo);
1052         }
1053       }
1054     }
1055 
1056     return regionFs;
1057   }
1058 
1059   /**
1060    * Remove the region from the table directory, archiving the region's hfiles.
1061    * @param conf the {@link Configuration} to use
1062    * @param fs {@link FileSystem} from which to remove the region
1063    * @param tableDir {@link Path} to where the table is being stored
1064    * @param regionInfo {@link HRegionInfo} for region to be deleted
1065    * @throws IOException if the request cannot be completed
1066    */
1067   public static void deleteRegionFromFileSystem(final Configuration conf,
1068       final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
1069     HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
1070     Path regionDir = regionFs.getRegionDir();
1071 
1072     if (!fs.exists(regionDir)) {
1073       LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
1074       return;
1075     }
1076 
1077     if (LOG.isDebugEnabled()) {
1078       LOG.debug("DELETING region " + regionDir);
1079     }
1080 
1081     // Archive region
1082     Path rootDir = FSUtils.getRootDir(conf);
1083     HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);
1084 
1085     // Delete empty region dir
1086     if (!fs.delete(regionDir, true)) {
1087       LOG.warn("Failed delete of " + regionDir);
1088     }
1089   }
1090 
1091   /**
1092    * Creates a directory. Assumes the user has already checked for this directory existence.
1093    * @param dir
1094    * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
1095    *         whether the directory exists or not, and returns true if it exists.
1096    * @throws IOException
1097    */
1098   boolean createDir(Path dir) throws IOException {
1099     int i = 0;
1100     IOException lastIOE = null;
1101     do {
1102       try {
1103         return mkdirs(fs, conf, dir);
1104       } catch (IOException ioe) {
1105         lastIOE = ioe;
1106         if (fs.exists(dir)) return true; // directory is present
1107         try {
1108           sleepBeforeRetry("Create Directory", i+1);
1109         } catch (InterruptedException e) {
1110           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1111         }
1112       }
1113     } while (++i <= hdfsClientRetriesNumber);
1114     throw new IOException("Exception in createDir", lastIOE);
1115   }
1116 
1117   /**
1118    * Renames a directory. Assumes the user has already checked for this directory existence.
1119    * @param srcpath
1120    * @param dstPath
1121    * @return true if rename is successful.
1122    * @throws IOException
1123    */
1124   boolean rename(Path srcpath, Path dstPath) throws IOException {
1125     IOException lastIOE = null;
1126     int i = 0;
1127     do {
1128       try {
1129         return fs.rename(srcpath, dstPath);
1130       } catch (IOException ioe) {
1131         lastIOE = ioe;
1132         if (!fs.exists(srcpath) && fs.exists(dstPath)) return true; // successful move
1133         // dir is not there, retry after some time.
1134         try {
1135           sleepBeforeRetry("Rename Directory", i+1);
1136         } catch (InterruptedException e) {
1137           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1138         }
1139       }
1140     } while (++i <= hdfsClientRetriesNumber);
1141 
1142     throw new IOException("Exception in rename", lastIOE);
1143   }
1144 
1145   /**
1146    * Deletes a directory. Assumes the user has already checked for this directory existence.
1147    * @param dir
1148    * @return true if the directory is deleted.
1149    * @throws IOException
1150    */
1151   boolean deleteDir(Path dir) throws IOException {
1152     IOException lastIOE = null;
1153     int i = 0;
1154     do {
1155       try {
1156         return fs.delete(dir, true);
1157       } catch (IOException ioe) {
1158         lastIOE = ioe;
1159         if (!fs.exists(dir)) return true;
1160         // dir is there, retry deleting after some time.
1161         try {
1162           sleepBeforeRetry("Delete Directory", i+1);
1163         } catch (InterruptedException e) {
1164           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1165         }
1166       }
1167     } while (++i <= hdfsClientRetriesNumber);
1168 
1169     throw new IOException("Exception in DeleteDir", lastIOE);
1170   }
1171 
1172   /**
1173    * sleeping logic; handles the interrupt exception.
1174    */
1175   private void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException {
1176     sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1177   }
1178 
1179   /**
1180    * Creates a directory for a filesystem and configuration object. Assumes the user has already
1181    * checked for this directory existence.
1182    * @param fs
1183    * @param conf
1184    * @param dir
1185    * @return the result of fs.mkdirs(). In case underlying fs throws an IOException, it checks
1186    *         whether the directory exists or not, and returns true if it exists.
1187    * @throws IOException
1188    */
1189   private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir)
1190       throws IOException {
1191     int i = 0;
1192     IOException lastIOE = null;
1193     int hdfsClientRetriesNumber = conf.getInt("hdfs.client.retries.number",
1194       DEFAULT_HDFS_CLIENT_RETRIES_NUMBER);
1195     int baseSleepBeforeRetries = conf.getInt("hdfs.client.sleep.before.retries",
1196       DEFAULT_BASE_SLEEP_BEFORE_RETRIES);
1197     do {
1198       try {
1199         return fs.mkdirs(dir);
1200       } catch (IOException ioe) {
1201         lastIOE = ioe;
1202         if (fs.exists(dir)) return true; // directory is present
1203         try {
1204           sleepBeforeRetry("Create Directory", i+1, baseSleepBeforeRetries, hdfsClientRetriesNumber);
1205         } catch (InterruptedException e) {
1206           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
1207         }
1208       }
1209     } while (++i <= hdfsClientRetriesNumber);
1210 
1211     throw new IOException("Exception in createDir", lastIOE);
1212   }
1213 
1214   /**
1215    * sleeping logic for static methods; handles the interrupt exception. Keeping a static version
1216    * for this to avoid re-looking for the integer values.
1217    */
1218   private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries,
1219       int hdfsClientRetriesNumber) throws InterruptedException {
1220     if (sleepMultiplier > hdfsClientRetriesNumber) {
1221       if (LOG.isDebugEnabled()) {
1222         LOG.debug(msg + ", retries exhausted");
1223       }
1224       return;
1225     }
1226     if (LOG.isDebugEnabled()) {
1227       LOG.debug(msg + ", sleeping " + baseSleepBeforeRetries + " times " + sleepMultiplier);
1228     }
1229     Thread.sleep((long)baseSleepBeforeRetries * sleepMultiplier);
1230   }
1231 }