1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import com.google.common.base.Throwables;
22 import com.google.common.collect.Iterators;
23 import com.google.common.primitives.Ints;
24
25 import edu.umd.cs.findbugs.annotations.CheckForNull;
26
27 import java.io.ByteArrayInputStream;
28 import java.io.DataInputStream;
29 import java.io.EOFException;
30 import java.io.FileNotFoundException;
31 import java.io.IOException;
32 import java.io.InputStream;
33 import java.io.InterruptedIOException;
34 import java.lang.reflect.InvocationTargetException;
35 import java.lang.reflect.Method;
36 import java.net.InetSocketAddress;
37 import java.util.ArrayList;
38 import java.util.Arrays;
39 import java.util.Collections;
40 import java.util.HashMap;
41 import java.util.Iterator;
42 import java.util.LinkedList;
43 import java.util.List;
44 import java.util.Locale;
45 import java.util.Map;
46 import java.util.Vector;
47 import java.util.concurrent.ArrayBlockingQueue;
48 import java.util.concurrent.ConcurrentHashMap;
49 import java.util.concurrent.ExecutionException;
50 import java.util.concurrent.ExecutorService;
51 import java.util.concurrent.Future;
52 import java.util.concurrent.FutureTask;
53 import java.util.concurrent.ThreadPoolExecutor;
54 import java.util.concurrent.TimeUnit;
55 import java.util.regex.Pattern;
56
57 import org.apache.commons.logging.Log;
58 import org.apache.commons.logging.LogFactory;
59 import org.apache.hadoop.conf.Configuration;
60 import org.apache.hadoop.fs.BlockLocation;
61 import org.apache.hadoop.fs.FSDataInputStream;
62 import org.apache.hadoop.fs.FSDataOutputStream;
63 import org.apache.hadoop.fs.FileStatus;
64 import org.apache.hadoop.fs.FileSystem;
65 import org.apache.hadoop.fs.Path;
66 import org.apache.hadoop.fs.PathFilter;
67 import org.apache.hadoop.fs.permission.FsAction;
68 import org.apache.hadoop.fs.permission.FsPermission;
69 import org.apache.hadoop.hbase.ClusterId;
70 import org.apache.hadoop.hbase.HColumnDescriptor;
71 import org.apache.hadoop.hbase.HConstants;
72 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
73 import org.apache.hadoop.hbase.HRegionInfo;
74 import org.apache.hadoop.hbase.RemoteExceptionHandler;
75 import org.apache.hadoop.hbase.TableName;
76 import org.apache.hadoop.hbase.classification.InterfaceAudience;
77 import org.apache.hadoop.hbase.exceptions.DeserializationException;
78 import org.apache.hadoop.hbase.fs.HFileSystem;
79 import org.apache.hadoop.hbase.io.HFileLink;
80 import org.apache.hadoop.hbase.master.HMaster;
81 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
82 import org.apache.hadoop.hbase.security.AccessDeniedException;
83 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
84 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
85 import org.apache.hadoop.hbase.protobuf.generated.FSProtos;
86 import org.apache.hadoop.hdfs.DFSClient;
87 import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
88 import org.apache.hadoop.hdfs.DistributedFileSystem;
89 import org.apache.hadoop.io.IOUtils;
90 import org.apache.hadoop.io.SequenceFile;
91 import org.apache.hadoop.security.UserGroupInformation;
92 import org.apache.hadoop.util.Progressable;
93 import org.apache.hadoop.util.ReflectionUtils;
94 import org.apache.hadoop.util.StringUtils;
95
96
97
98
99 @InterfaceAudience.Private
100 public abstract class FSUtils extends CommonFSUtils {
101 private static final Log LOG = LogFactory.getLog(FSUtils.class);
102
103 private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize";
104 private static final int DEFAULT_THREAD_POOLSIZE = 2;
105
106
107
108 public static final boolean WINDOWS = System.getProperty("os.name").startsWith("Windows");
109
110 protected FSUtils() {
111 super();
112 }
113
114
115
116
117
118 public static boolean isDistributedFileSystem(final FileSystem fs) throws IOException {
119 FileSystem fileSystem = fs;
120
121
122 if (fs instanceof HFileSystem) {
123 fileSystem = ((HFileSystem)fs).getBackingFs();
124 }
125 return fileSystem instanceof DistributedFileSystem;
126 }
127
128
129
130
131
132
133
134
135
136 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
137 if (pathToSearch.depth() != pathTail.depth()) return false;
138 Path tailPath = pathTail;
139 String tailName;
140 Path toSearch = pathToSearch;
141 String toSearchName;
142 boolean result = false;
143 do {
144 tailName = tailPath.getName();
145 if (tailName == null || tailName.length() <= 0) {
146 result = true;
147 break;
148 }
149 toSearchName = toSearch.getName();
150 if (toSearchName == null || toSearchName.length() <= 0) break;
151
152 tailPath = tailPath.getParent();
153 toSearch = toSearch.getParent();
154 } while(tailName.equals(toSearchName));
155 return result;
156 }
157
158 public static FSUtils getInstance(FileSystem fs, Configuration conf) {
159 String scheme = fs.getUri().getScheme();
160 if (scheme == null) {
161 LOG.warn("Could not find scheme for uri " +
162 fs.getUri() + ", default to hdfs");
163 scheme = "hdfs";
164 }
165 Class<?> fsUtilsClass = conf.getClass("hbase.fsutil." +
166 scheme + ".impl", FSHDFSUtils.class);
167 FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf);
168 return fsUtils;
169 }
170
171
172
173
174
175
176
177
178 public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri)
179 throws IOException {
180 Path rootDir = getRootDir(conf);
181 FileSystem fs = rootDir.getFileSystem(conf);
182 return deleteDirectory(fs,
183 new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName()));
184 }
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205 public static FSDataOutputStream create(Configuration conf, FileSystem fs, Path path,
206 FsPermission perm, InetSocketAddress[] favoredNodes) throws IOException {
207 if (fs instanceof HFileSystem) {
208 FileSystem backingFs = ((HFileSystem)fs).getBackingFs();
209 if (backingFs instanceof DistributedFileSystem) {
210
211
212 short replication = Short.parseShort(conf.get(HColumnDescriptor.DFS_REPLICATION,
213 String.valueOf(HColumnDescriptor.DEFAULT_DFS_REPLICATION)));
214 try {
215 return (FSDataOutputStream) (DistributedFileSystem.class.getDeclaredMethod("create",
216 Path.class, FsPermission.class, boolean.class, int.class, short.class, long.class,
217 Progressable.class, InetSocketAddress[].class).invoke(backingFs, path, perm, true,
218 getDefaultBufferSize(backingFs),
219 replication > 0 ? replication : getDefaultReplication(backingFs, path),
220 getDefaultBlockSize(backingFs, path), null, favoredNodes));
221 } catch (InvocationTargetException ite) {
222
223 throw new IOException(ite.getCause());
224 } catch (NoSuchMethodException e) {
225 LOG.debug("DFS Client does not support most favored nodes create; using default create");
226 if (LOG.isTraceEnabled()) LOG.trace("Ignoring; use default create", e);
227 } catch (IllegalArgumentException e) {
228 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
229 } catch (SecurityException e) {
230 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
231 } catch (IllegalAccessException e) {
232 LOG.debug("Ignoring (most likely Reflection related exception) " + e);
233 }
234 }
235 }
236 return create(fs, path, perm, true);
237 }
238
239
240
241
242
243
244
245 public static void checkFileSystemAvailable(final FileSystem fs)
246 throws IOException {
247 if (!(fs instanceof DistributedFileSystem)) {
248 return;
249 }
250 IOException exception = null;
251 DistributedFileSystem dfs = (DistributedFileSystem) fs;
252 try {
253 if (dfs.exists(new Path("/"))) {
254 return;
255 }
256 } catch (IOException e) {
257 exception = RemoteExceptionHandler.checkIOException(e);
258 }
259 try {
260 fs.close();
261 } catch (Exception e) {
262 LOG.error("file system close failed: ", e);
263 }
264 throw new IOException("File system is not available", exception);
265 }
266
267
268
269
270
271
272
273
274
275 private static boolean isInSafeMode(DistributedFileSystem dfs) throws IOException {
276 boolean inSafeMode = false;
277 try {
278 Method m = DistributedFileSystem.class.getMethod("setSafeMode", new Class<?> []{
279 org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.class, boolean.class});
280 inSafeMode = (Boolean) m.invoke(dfs,
281 org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET, true);
282 } catch (Exception e) {
283 if (e instanceof IOException) throw (IOException) e;
284
285
286 inSafeMode = dfs.setSafeMode(
287 org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction.SAFEMODE_GET);
288 }
289 return inSafeMode;
290 }
291
292
293
294
295
296
297 public static void checkDfsSafeMode(final Configuration conf)
298 throws IOException {
299 boolean isInSafeMode = false;
300 FileSystem fs = FileSystem.get(conf);
301 if (fs instanceof DistributedFileSystem) {
302 DistributedFileSystem dfs = (DistributedFileSystem)fs;
303 isInSafeMode = isInSafeMode(dfs);
304 }
305 if (isInSafeMode) {
306 throw new IOException("File system is in safemode, it can't be written now");
307 }
308 }
309
310
311
312
313
314
315
316
317
318
319 public static String getVersion(FileSystem fs, Path rootdir)
320 throws IOException, DeserializationException {
321 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
322 FileStatus[] status = null;
323 try {
324
325
326 status = fs.listStatus(versionFile);
327 } catch (FileNotFoundException fnfe) {
328 return null;
329 }
330 if (status == null || status.length == 0) return null;
331 String version = null;
332 byte [] content = new byte [(int)status[0].getLen()];
333 FSDataInputStream s = fs.open(versionFile);
334 try {
335 IOUtils.readFully(s, content, 0, content.length);
336 if (ProtobufUtil.isPBMagicPrefix(content)) {
337 version = parseVersionFrom(content);
338 } else {
339
340 InputStream is = new ByteArrayInputStream(content);
341 DataInputStream dis = new DataInputStream(is);
342 try {
343 version = dis.readUTF();
344 } finally {
345 dis.close();
346 }
347 }
348 } catch (EOFException eof) {
349 LOG.warn("Version file was empty, odd, will try to set it.");
350 } finally {
351 s.close();
352 }
353 return version;
354 }
355
356
357
358
359
360
361
362 static String parseVersionFrom(final byte [] bytes)
363 throws DeserializationException {
364 ProtobufUtil.expectPBMagicPrefix(bytes);
365 int pblen = ProtobufUtil.lengthOfPBMagic();
366 FSProtos.HBaseVersionFileContent.Builder builder =
367 FSProtos.HBaseVersionFileContent.newBuilder();
368 try {
369 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
370 return builder.getVersion();
371 } catch (IOException e) {
372
373 throw new DeserializationException(e);
374 }
375 }
376
377
378
379
380
381
382 static byte [] toVersionByteArray(final String version) {
383 FSProtos.HBaseVersionFileContent.Builder builder =
384 FSProtos.HBaseVersionFileContent.newBuilder();
385 return ProtobufUtil.prependPBMagic(builder.setVersion(version).build().toByteArray());
386 }
387
388
389
390
391
392
393
394
395
396
397
398 public static void checkVersion(FileSystem fs, Path rootdir, boolean message)
399 throws IOException, DeserializationException {
400 checkVersion(fs, rootdir, message, 0, HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
401 }
402
403
404
405
406
407
408
409
410
411
412
413
414
415 public static void checkVersion(FileSystem fs, Path rootdir,
416 boolean message, int wait, int retries)
417 throws IOException, DeserializationException {
418 String version = getVersion(fs, rootdir);
419 if (version == null) {
420 if (!metaRegionExists(fs, rootdir)) {
421
422
423 setVersion(fs, rootdir, wait, retries);
424 return;
425 }
426 } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) return;
427
428
429
430 String msg = "HBase file layout needs to be upgraded."
431 + " You have version " + version
432 + " and I want version " + HConstants.FILE_SYSTEM_VERSION
433 + ". Consult http://hbase.apache.org/book.html for further information about upgrading HBase."
434 + " Is your hbase.rootdir valid? If so, you may need to run "
435 + "'hbase hbck -fixVersionFile'.";
436 if (message) {
437 System.out.println("WARNING! " + msg);
438 }
439 throw new FileSystemVersionException(msg);
440 }
441
442
443
444
445
446
447
448
449 public static void setVersion(FileSystem fs, Path rootdir)
450 throws IOException {
451 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0,
452 HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS);
453 }
454
455
456
457
458
459
460
461
462
463
464 public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries)
465 throws IOException {
466 setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries);
467 }
468
469
470
471
472
473
474
475
476
477
478
479
480 public static void setVersion(FileSystem fs, Path rootdir, String version,
481 int wait, int retries) throws IOException {
482 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
483 Path tempVersionFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY + Path.SEPARATOR +
484 HConstants.VERSION_FILE_NAME);
485 while (true) {
486 try {
487
488 FSDataOutputStream s = fs.create(tempVersionFile);
489 try {
490 s.write(toVersionByteArray(version));
491 s.close();
492 s = null;
493
494
495 if (!fs.rename(tempVersionFile, versionFile)) {
496 throw new IOException("Unable to move temp version file to " + versionFile);
497 }
498 } finally {
499
500
501
502
503
504 try {
505 if (s != null) s.close();
506 } catch (IOException ignore) { }
507 }
508 LOG.info("Created version file at " + rootdir.toString() + " with version=" + version);
509 return;
510 } catch (IOException e) {
511 if (retries > 0) {
512 LOG.debug("Unable to create version file at " + rootdir.toString() + ", retrying", e);
513 fs.delete(versionFile, false);
514 try {
515 if (wait > 0) {
516 Thread.sleep(wait);
517 }
518 } catch (InterruptedException ie) {
519 throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
520 }
521 retries--;
522 } else {
523 throw e;
524 }
525 }
526 }
527 }
528
529
530
531
532
533
534
535
536
537 public static boolean checkClusterIdExists(FileSystem fs, Path rootdir,
538 int wait) throws IOException {
539 while (true) {
540 try {
541 Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
542 return fs.exists(filePath);
543 } catch (IOException ioe) {
544 if (wait > 0) {
545 LOG.warn("Unable to check cluster ID file in " + rootdir.toString() +
546 ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe));
547 try {
548 Thread.sleep(wait);
549 } catch (InterruptedException e) {
550 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
551 }
552 } else {
553 throw ioe;
554 }
555 }
556 }
557 }
558
559
560
561
562
563
564
565
566 public static ClusterId getClusterId(FileSystem fs, Path rootdir)
567 throws IOException {
568 Path idPath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
569 ClusterId clusterId = null;
570 FileStatus status = fs.exists(idPath)? fs.getFileStatus(idPath): null;
571 if (status != null) {
572 int len = Ints.checkedCast(status.getLen());
573 byte [] content = new byte[len];
574 FSDataInputStream in = fs.open(idPath);
575 try {
576 in.readFully(content);
577 } catch (EOFException eof) {
578 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
579 } finally{
580 in.close();
581 }
582 try {
583 clusterId = ClusterId.parseFrom(content);
584 } catch (DeserializationException e) {
585 throw new IOException("content=" + Bytes.toString(content), e);
586 }
587
588 if (!ProtobufUtil.isPBMagicPrefix(content)) {
589 String cid = null;
590 in = fs.open(idPath);
591 try {
592 cid = in.readUTF();
593 clusterId = new ClusterId(cid);
594 } catch (EOFException eof) {
595 LOG.warn("Cluster ID file " + idPath.toString() + " was empty");
596 } finally {
597 in.close();
598 }
599 rewriteAsPb(fs, rootdir, idPath, clusterId);
600 }
601 return clusterId;
602 } else {
603 LOG.warn("Cluster ID file does not exist at " + idPath.toString());
604 }
605 return clusterId;
606 }
607
608
609
610
611
612 private static void rewriteAsPb(final FileSystem fs, final Path rootdir, final Path p,
613 final ClusterId cid)
614 throws IOException {
615
616
617 Path movedAsideName = new Path(p + "." + System.currentTimeMillis());
618 if (!fs.rename(p, movedAsideName)) throw new IOException("Failed rename of " + p);
619 setClusterId(fs, rootdir, cid, 100);
620 if (!fs.delete(movedAsideName, false)) {
621 throw new IOException("Failed delete of " + movedAsideName);
622 }
623 LOG.debug("Rewrote the hbase.id file as pb");
624 }
625
626
627
628
629
630
631
632
633
634
635 public static void setClusterId(FileSystem fs, Path rootdir, ClusterId clusterId,
636 int wait) throws IOException {
637 while (true) {
638 try {
639 Path idFile = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME);
640 Path tempIdFile = new Path(rootdir, HConstants.HBASE_TEMP_DIRECTORY +
641 Path.SEPARATOR + HConstants.CLUSTER_ID_FILE_NAME);
642
643 FSDataOutputStream s = fs.create(tempIdFile);
644 try {
645 s.write(clusterId.toByteArray());
646 s.close();
647 s = null;
648
649
650 if (!fs.rename(tempIdFile, idFile)) {
651 throw new IOException("Unable to move temp version file to " + idFile);
652 }
653 } finally {
654
655 try {
656 if (s != null) s.close();
657 } catch (IOException ignore) { }
658 }
659 if (LOG.isDebugEnabled()) {
660 LOG.debug("Created cluster ID file at " + idFile.toString() + " with ID: " + clusterId);
661 }
662 return;
663 } catch (IOException ioe) {
664 if (wait > 0) {
665 LOG.warn("Unable to create cluster ID file in " + rootdir.toString() +
666 ", retrying in " + wait + "msec: " + StringUtils.stringifyException(ioe));
667 try {
668 Thread.sleep(wait);
669 } catch (InterruptedException e) {
670 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
671 }
672 } else {
673 throw ioe;
674 }
675 }
676 }
677 }
678
679
680
681
682
683
684
685 public static void waitOnSafeMode(final Configuration conf,
686 final long wait)
687 throws IOException {
688 FileSystem fs = FileSystem.get(conf);
689 if (!(fs instanceof DistributedFileSystem)) return;
690 DistributedFileSystem dfs = (DistributedFileSystem)fs;
691
692 while (isInSafeMode(dfs)) {
693 LOG.info("Waiting for dfs to exit safe mode...");
694 try {
695 Thread.sleep(wait);
696 } catch (InterruptedException e) {
697 throw (InterruptedIOException)new InterruptedIOException().initCause(e);
698 }
699 }
700 }
701
702
703
704
705
706
707
708
709 public static Path getWALRegionDir(final Configuration conf, final HRegionInfo regionInfo)
710 throws IOException {
711 return new Path(getWALTableDir(conf, regionInfo.getTable()),
712 regionInfo.getEncodedName());
713 }
714
715
716
717
718
719
720
721
722
723 public static Path getWALRegionDir(final Configuration conf, final TableName tableName,
724 final String encodedRegionName) throws IOException {
725 return new Path(getWALTableDir(conf, tableName), encodedRegionName);
726 }
727
728
729
730
731
732
733
734
735
736 @SuppressWarnings("deprecation")
737 public static boolean metaRegionExists(FileSystem fs, Path rootDir) throws IOException {
738 Path metaRegionDir = getRegionDirFromRootDir(rootDir, HRegionInfo.FIRST_META_REGIONINFO);
739 return fs.exists(metaRegionDir);
740 }
741
742
743
744
745
746
747
748
749
750 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
751 final FileSystem fs, FileStatus status, long start, long length)
752 throws IOException {
753 HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
754 BlockLocation [] blockLocations =
755 fs.getFileBlockLocations(status, start, length);
756 for(BlockLocation bl : blockLocations) {
757 String [] hosts = bl.getHosts();
758 long len = bl.getLength();
759 blocksDistribution.addHostsAndBlockWeight(hosts, len);
760 }
761
762 return blocksDistribution;
763 }
764
765
766
767
768
769
770 static public void addToHDFSBlocksDistribution(
771 HDFSBlocksDistribution blocksDistribution, BlockLocation[] blockLocations)
772 throws IOException {
773 for (BlockLocation bl : blockLocations) {
774 String[] hosts = bl.getHosts();
775 long len = bl.getLength();
776 blocksDistribution.addHostsAndBlockWeight(hosts, len);
777 }
778 }
779
780
781
782
783
784
785
786
787
788
789 public static int getTotalTableFragmentation(final HMaster master)
790 throws IOException {
791 Map<String, Integer> map = getTableFragmentation(master);
792 return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
793 }
794
795
796
797
798
799
800
801
802
803
804
805 public static Map<String, Integer> getTableFragmentation(
806 final HMaster master)
807 throws IOException {
808 Path path = getRootDir(master.getConfiguration());
809
810 FileSystem fs = path.getFileSystem(master.getConfiguration());
811 return getTableFragmentation(fs, path);
812 }
813
814
815
816
817
818
819
820
821
822
823
824 public static Map<String, Integer> getTableFragmentation(
825 final FileSystem fs, final Path hbaseRootDir)
826 throws IOException {
827 Map<String, Integer> frags = new HashMap<String, Integer>();
828 int cfCountTotal = 0;
829 int cfFragTotal = 0;
830 PathFilter regionFilter = new RegionDirFilter(fs);
831 PathFilter familyFilter = new FamilyDirFilter(fs);
832 List<Path> tableDirs = getTableDirs(fs, hbaseRootDir);
833 for (Path d : tableDirs) {
834 int cfCount = 0;
835 int cfFrag = 0;
836 FileStatus[] regionDirs = fs.listStatus(d, regionFilter);
837 for (FileStatus regionDir : regionDirs) {
838 Path dd = regionDir.getPath();
839
840 FileStatus[] familyDirs = fs.listStatus(dd, familyFilter);
841 for (FileStatus familyDir : familyDirs) {
842 cfCount++;
843 cfCountTotal++;
844 Path family = familyDir.getPath();
845
846 FileStatus[] familyStatus = fs.listStatus(family);
847 if (familyStatus.length > 1) {
848 cfFrag++;
849 cfFragTotal++;
850 }
851 }
852 }
853
854 frags.put(FSUtils.getTableName(d).getNameAsString(),
855 cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
856 }
857
858 frags.put("-TOTAL-",
859 cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
860 return frags;
861 }
862
863
864
865
866
867
868
869
870 public static Path getWALTableDir(final Configuration conf, final TableName tableName)
871 throws IOException {
872 Path baseDir = new Path(getWALRootDir(conf), HConstants.BASE_NAMESPACE_DIR);
873 return new Path(new Path(baseDir, tableName.getNamespaceAsString()),
874 tableName.getQualifierAsString());
875 }
876
877
878
879
880
881
882 @Deprecated
883 public static Path getWrongWALRegionDir(final Configuration conf, final TableName tableName,
884 final String encodedRegionName) throws IOException {
885 Path wrongTableDir = new Path(new Path(getWALRootDir(conf), tableName.getNamespaceAsString()),
886 tableName.getQualifierAsString());
887 return new Path(wrongTableDir, encodedRegionName);
888 }
889
890
891
892
893 static class FileFilter extends AbstractFileStatusFilter {
894 private final FileSystem fs;
895
896 public FileFilter(final FileSystem fs) {
897 this.fs = fs;
898 }
899
900 @Override
901 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
902 try {
903 return isFile(fs, isDir, p);
904 } catch (IOException e) {
905 LOG.warn("unable to verify if path=" + p + " is a regular file", e);
906 return false;
907 }
908 }
909 }
910
911
912
913
914 public static class BlackListDirFilter extends AbstractFileStatusFilter {
915 private final FileSystem fs;
916 private List<String> blacklist;
917
918
919
920
921
922
923
924 @SuppressWarnings("unchecked")
925 public BlackListDirFilter(final FileSystem fs, final List<String> directoryNameBlackList) {
926 this.fs = fs;
927 blacklist =
928 (List<String>) (directoryNameBlackList == null ? Collections.emptyList()
929 : directoryNameBlackList);
930 }
931
932 @Override
933 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
934 if (!isValidName(p.getName())) {
935 return false;
936 }
937
938 try {
939 return isDirectory(fs, isDir, p);
940 } catch (IOException e) {
941 LOG.warn("An error occurred while verifying if [" + p.toString()
942 + "] is a valid directory. Returning 'not valid' and continuing.", e);
943 return false;
944 }
945 }
946
947 protected boolean isValidName(final String name) {
948 return !blacklist.contains(name);
949 }
950 }
951
952
953
954
955 public static class DirFilter extends BlackListDirFilter {
956
957 public DirFilter(FileSystem fs) {
958 super(fs, null);
959 }
960 }
961
962
963
964
965
966 public static class UserTableDirFilter extends BlackListDirFilter {
967 public UserTableDirFilter(FileSystem fs) {
968 super(fs, HConstants.HBASE_NON_TABLE_DIRS);
969 }
970
971 @Override
972 protected boolean isValidName(final String name) {
973 if (!super.isValidName(name))
974 return false;
975
976 try {
977 TableName.isLegalTableQualifierName(Bytes.toBytes(name));
978 } catch (IllegalArgumentException e) {
979 LOG.info("INVALID NAME " + name);
980 return false;
981 }
982 return true;
983 }
984 }
985
986
987
988
989
990
991
992
993 public static boolean isAppendSupported(final Configuration conf) {
994 boolean append = conf.getBoolean("dfs.support.append", false);
995 if (append) {
996 try {
997
998
999
1000 SequenceFile.Writer.class.getMethod("syncFs", new Class<?> []{});
1001 append = true;
1002 } catch (SecurityException e) {
1003 } catch (NoSuchMethodException e) {
1004 append = false;
1005 }
1006 }
1007 if (!append) {
1008
1009 try {
1010 FSDataOutputStream.class.getMethod("hflush", new Class<?> []{});
1011 append = true;
1012 } catch (NoSuchMethodException e) {
1013 append = false;
1014 }
1015 }
1016 return append;
1017 }
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 public abstract void recoverFileLease(final FileSystem fs, final Path p,
1028 Configuration conf, CancelableProgressable reporter) throws IOException;
1029
1030 public static List<Path> getTableDirs(final FileSystem fs, final Path rootdir)
1031 throws IOException {
1032 List<Path> tableDirs = new LinkedList<Path>();
1033
1034 for(FileStatus status :
1035 fs.globStatus(new Path(rootdir,
1036 new Path(HConstants.BASE_NAMESPACE_DIR, "*")))) {
1037 tableDirs.addAll(FSUtils.getLocalTableDirs(fs, status.getPath()));
1038 }
1039 return tableDirs;
1040 }
1041
1042
1043
1044
1045
1046
1047
1048
1049 public static List<Path> getLocalTableDirs(final FileSystem fs, final Path rootdir)
1050 throws IOException {
1051
1052 FileStatus[] dirs = fs.listStatus(rootdir, new UserTableDirFilter(fs));
1053 List<Path> tabledirs = new ArrayList<Path>(dirs.length);
1054 for (FileStatus dir: dirs) {
1055 tabledirs.add(dir.getPath());
1056 }
1057 return tabledirs;
1058 }
1059
1060
1061
1062
1063 public static class RegionDirFilter extends AbstractFileStatusFilter {
1064
1065 final public static Pattern regionDirPattern = Pattern.compile("^[0-9a-f]*$");
1066 final FileSystem fs;
1067
1068 public RegionDirFilter(FileSystem fs) {
1069 this.fs = fs;
1070 }
1071
1072 @Override
1073 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1074 if (!regionDirPattern.matcher(p.getName()).matches()) {
1075 return false;
1076 }
1077
1078 try {
1079 return isDirectory(fs, isDir, p);
1080 } catch (IOException ioe) {
1081
1082 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1083 return false;
1084 }
1085 }
1086 }
1087
1088 public static Path getRegionDirFromRootDir(Path rootDir, HRegionInfo region) {
1089 return getRegionDirFromTableDir(getTableDir(rootDir, region.getTable()), region);
1090 }
1091
1092 public static Path getRegionDirFromTableDir(Path tableDir, HRegionInfo region) {
1093 return new Path(tableDir, ServerRegionReplicaUtil.getRegionInfoForFs(region).getEncodedName());
1094 }
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104 public static List<Path> getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException {
1105
1106 List<FileStatus> rds = listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
1107 if (rds == null) {
1108 return new ArrayList<Path>();
1109 }
1110 List<Path> regionDirs = new ArrayList<Path>(rds.size());
1111 for (FileStatus rdfs: rds) {
1112 Path rdPath = rdfs.getPath();
1113 regionDirs.add(rdPath);
1114 }
1115 return regionDirs;
1116 }
1117
1118
1119
1120
1121
1122 public static class FamilyDirFilter extends AbstractFileStatusFilter {
1123 final FileSystem fs;
1124
1125 public FamilyDirFilter(FileSystem fs) {
1126 this.fs = fs;
1127 }
1128
1129 @Override
1130 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1131 try {
1132
1133 HColumnDescriptor.isLegalFamilyName(Bytes.toBytes(p.getName()));
1134 } catch (IllegalArgumentException iae) {
1135
1136 return false;
1137 }
1138
1139 try {
1140 return isDirectory(fs, isDir, p);
1141 } catch (IOException ioe) {
1142
1143 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1144 return false;
1145 }
1146 }
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 public static List<Path> getFamilyDirs(final FileSystem fs, final Path regionDir) throws IOException {
1158
1159 FileStatus[] fds = fs.listStatus(regionDir, new FamilyDirFilter(fs));
1160 List<Path> familyDirs = new ArrayList<Path>(fds.length);
1161 for (FileStatus fdfs: fds) {
1162 Path fdPath = fdfs.getPath();
1163 familyDirs.add(fdPath);
1164 }
1165 return familyDirs;
1166 }
1167
1168 public static List<Path> getReferenceFilePaths(final FileSystem fs, final Path familyDir) throws IOException {
1169 List<FileStatus> fds = listStatusWithStatusFilter(fs, familyDir, new ReferenceFileFilter(fs));
1170 if (fds == null) {
1171 return new ArrayList<Path>();
1172 }
1173 List<Path> referenceFiles = new ArrayList<Path>(fds.size());
1174 for (FileStatus fdfs: fds) {
1175 Path fdPath = fdfs.getPath();
1176 referenceFiles.add(fdPath);
1177 }
1178 return referenceFiles;
1179 }
1180
1181
1182
1183
1184 public static class HFileFilter extends AbstractFileStatusFilter {
1185 final FileSystem fs;
1186
1187 public HFileFilter(FileSystem fs) {
1188 this.fs = fs;
1189 }
1190
1191 @Override
1192 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1193 if (!StoreFileInfo.isHFile(p)) {
1194 return false;
1195 }
1196
1197 try {
1198 return isFile(fs, isDir, p);
1199 } catch (IOException ioe) {
1200
1201 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1202 return false;
1203 }
1204 }
1205 }
1206
1207
1208
1209
1210
1211 public static class HFileLinkFilter implements PathFilter {
1212
1213 @Override
1214 public boolean accept(Path p) {
1215 return HFileLink.isHFileLink(p);
1216 }
1217 }
1218
1219 public static class ReferenceFileFilter extends AbstractFileStatusFilter {
1220
1221 private final FileSystem fs;
1222
1223 public ReferenceFileFilter(FileSystem fs) {
1224 this.fs = fs;
1225 }
1226
1227 @Override
1228 protected boolean accept(Path p, @CheckForNull Boolean isDir) {
1229 if (!StoreFileInfo.isReference(p)) {
1230 return false;
1231 }
1232
1233 try {
1234
1235 return isFile(fs, isDir, p);
1236 } catch (IOException ioe) {
1237
1238 LOG.warn("Skipping file " + p +" due to IOException", ioe);
1239 return false;
1240 }
1241 }
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 public static Map<String, Path> getTableStoreFilePathMap(Map<String, Path> map,
1261 final FileSystem fs, final Path hbaseRootDir, TableName tableName)
1262 throws IOException, InterruptedException {
1263 return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null, null, null);
1264 }
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 public static Map<String, Path> getTableStoreFilePathMap(
1289 Map<String, Path> resultMap,
1290 final FileSystem fs, final Path hbaseRootDir, TableName tableName, final PathFilter sfFilter,
1291 ExecutorService executor, final ErrorReporter errors) throws IOException, InterruptedException {
1292
1293 final Map<String, Path> finalResultMap =
1294 resultMap == null ? new ConcurrentHashMap<String, Path>(128, 0.75f, 32) : resultMap;
1295
1296
1297 Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
1298
1299
1300 final FamilyDirFilter familyFilter = new FamilyDirFilter(fs);
1301 final Vector<Exception> exceptions = new Vector<Exception>();
1302
1303 try {
1304 List<FileStatus> regionDirs = FSUtils.listStatusWithStatusFilter(fs, tableDir, new RegionDirFilter(fs));
1305 if (regionDirs == null) {
1306 return finalResultMap;
1307 }
1308
1309 final List<Future<?>> futures = new ArrayList<Future<?>>(regionDirs.size());
1310
1311 for (FileStatus regionDir : regionDirs) {
1312 if (null != errors) {
1313 errors.progress();
1314 }
1315 final Path dd = regionDir.getPath();
1316
1317 if (!exceptions.isEmpty()) {
1318 break;
1319 }
1320
1321 Runnable getRegionStoreFileMapCall = new Runnable() {
1322 @Override
1323 public void run() {
1324 try {
1325 HashMap<String,Path> regionStoreFileMap = new HashMap<String, Path>();
1326 List<FileStatus> familyDirs = FSUtils.listStatusWithStatusFilter(fs, dd, familyFilter);
1327 if (familyDirs == null) {
1328 if (!fs.exists(dd)) {
1329 LOG.warn("Skipping region because it no longer exists: " + dd);
1330 } else {
1331 LOG.warn("Skipping region because it has no family dirs: " + dd);
1332 }
1333 return;
1334 }
1335 for (FileStatus familyDir : familyDirs) {
1336 if (null != errors) {
1337 errors.progress();
1338 }
1339 Path family = familyDir.getPath();
1340 if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) {
1341 continue;
1342 }
1343
1344
1345 FileStatus[] familyStatus = fs.listStatus(family);
1346 for (FileStatus sfStatus : familyStatus) {
1347 if (null != errors) {
1348 errors.progress();
1349 }
1350 Path sf = sfStatus.getPath();
1351 if (sfFilter == null || sfFilter.accept(sf)) {
1352 regionStoreFileMap.put( sf.getName(), sf);
1353 }
1354 }
1355 }
1356 finalResultMap.putAll(regionStoreFileMap);
1357 } catch (Exception e) {
1358 LOG.error("Could not get region store file map for region: " + dd, e);
1359 exceptions.add(e);
1360 }
1361 }
1362 };
1363
1364
1365
1366 if (executor != null) {
1367 Future<?> future = executor.submit(getRegionStoreFileMapCall);
1368 futures.add(future);
1369 } else {
1370 FutureTask<?> future = new FutureTask<Object>(getRegionStoreFileMapCall, null);
1371 future.run();
1372 futures.add(future);
1373 }
1374 }
1375
1376
1377 for (Future<?> f : futures) {
1378 if (!exceptions.isEmpty()) {
1379 break;
1380 }
1381 try {
1382 f.get();
1383 } catch (ExecutionException e) {
1384 LOG.error("Unexpected exec exception! Should've been caught already. (Bug?)", e);
1385
1386 }
1387 }
1388 } catch (IOException e) {
1389 LOG.error("Cannot execute getTableStoreFilePathMap for " + tableName, e);
1390 exceptions.add(e);
1391 } finally {
1392 if (!exceptions.isEmpty()) {
1393
1394
1395 Throwables.propagateIfInstanceOf(exceptions.firstElement(), IOException.class);
1396 throw Throwables.propagate(exceptions.firstElement());
1397 }
1398 }
1399
1400 return finalResultMap;
1401 }
1402
1403 public static int getRegionReferenceFileCount(final FileSystem fs, final Path p) {
1404 int result = 0;
1405 try {
1406 for (Path familyDir:getFamilyDirs(fs, p)){
1407 result += getReferenceFilePaths(fs, familyDir).size();
1408 }
1409 } catch (IOException e) {
1410 LOG.warn("Error Counting reference files.", e);
1411 }
1412 return result;
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 public static Map<String, Path> getTableStoreFilePathMap(
1430 final FileSystem fs, final Path hbaseRootDir)
1431 throws IOException, InterruptedException {
1432 return getTableStoreFilePathMap(fs, hbaseRootDir, null, null, null);
1433 }
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452 public static Map<String, Path> getTableStoreFilePathMap(
1453 final FileSystem fs, final Path hbaseRootDir, PathFilter sfFilter,
1454 ExecutorService executor, ErrorReporter errors)
1455 throws IOException, InterruptedException {
1456 ConcurrentHashMap<String, Path> map = new ConcurrentHashMap<String, Path>(1024, 0.75f, 32);
1457
1458
1459
1460
1461
1462 for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) {
1463 getTableStoreFilePathMap(map, fs, hbaseRootDir,
1464 FSUtils.getTableName(tableDir), sfFilter, executor, errors);
1465 }
1466 return map;
1467 }
1468
1469
1470
1471
1472
1473
1474
1475
1476 public static List<FileStatus> filterFileStatuses(FileStatus[] input,
1477 FileStatusFilter filter) {
1478 if (input == null) return null;
1479 return filterFileStatuses(Iterators.forArray(input), filter);
1480 }
1481
1482
1483
1484
1485
1486
1487
1488
1489 public static List<FileStatus> filterFileStatuses(Iterator<FileStatus> input,
1490 FileStatusFilter filter) {
1491 if (input == null) return null;
1492 ArrayList<FileStatus> results = new ArrayList<FileStatus>();
1493 while (input.hasNext()) {
1494 FileStatus f = input.next();
1495 if (filter.accept(f)) {
1496 results.add(f);
1497 }
1498 }
1499 return results;
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513 public static List<FileStatus> listStatusWithStatusFilter(final FileSystem fs,
1514 final Path dir, final FileStatusFilter filter) throws IOException {
1515 FileStatus [] status = null;
1516 try {
1517 status = fs.listStatus(dir);
1518 } catch (FileNotFoundException fnfe) {
1519
1520 if (LOG.isTraceEnabled()) {
1521 LOG.trace(dir + " doesn't exist");
1522 }
1523 }
1524
1525 if (status == null || status.length < 1) {
1526 return null;
1527 }
1528
1529 if (filter == null) {
1530 return Arrays.asList(status);
1531 } else {
1532 List<FileStatus> status2 = filterFileStatuses(status, filter);
1533 if (status2 == null || status2.isEmpty()) {
1534 return null;
1535 } else {
1536 return status2;
1537 }
1538 }
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 public static void checkAccess(UserGroupInformation ugi, FileStatus file,
1552 FsAction action) throws AccessDeniedException {
1553 if (ugi.getShortUserName().equals(file.getOwner())) {
1554 if (file.getPermission().getUserAction().implies(action)) {
1555 return;
1556 }
1557 } else if (contains(ugi.getGroupNames(), file.getGroup())) {
1558 if (file.getPermission().getGroupAction().implies(action)) {
1559 return;
1560 }
1561 } else if (file.getPermission().getOtherAction().implies(action)) {
1562 return;
1563 }
1564 throw new AccessDeniedException("Permission denied:" + " action=" + action
1565 + " path=" + file.getPath() + " user=" + ugi.getShortUserName());
1566 }
1567
1568 private static boolean contains(String[] groups, String user) {
1569 for (String group : groups) {
1570 if (group.equals(user)) {
1571 return true;
1572 }
1573 }
1574 return false;
1575 }
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1591 final Configuration conf) throws IOException {
1592 return getRegionDegreeLocalityMappingFromFS(
1593 conf, null,
1594 conf.getInt(THREAD_POOLSIZE, DEFAULT_THREAD_POOLSIZE));
1595
1596 }
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 public static Map<String, Map<String, Float>> getRegionDegreeLocalityMappingFromFS(
1615 final Configuration conf, final String desiredTable, int threadPoolSize)
1616 throws IOException {
1617 Map<String, Map<String, Float>> regionDegreeLocalityMapping =
1618 new ConcurrentHashMap<String, Map<String, Float>>();
1619 getRegionLocalityMappingFromFS(conf, desiredTable, threadPoolSize, null,
1620 regionDegreeLocalityMapping);
1621 return regionDegreeLocalityMapping;
1622 }
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644 private static void getRegionLocalityMappingFromFS(
1645 final Configuration conf, final String desiredTable,
1646 int threadPoolSize,
1647 Map<String, String> regionToBestLocalityRSMapping,
1648 Map<String, Map<String, Float>> regionDegreeLocalityMapping)
1649 throws IOException {
1650 FileSystem fs = FileSystem.get(conf);
1651 Path rootPath = FSUtils.getRootDir(conf);
1652 long startTime = EnvironmentEdgeManager.currentTime();
1653 Path queryPath;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 public static void setupShortCircuitRead(final Configuration conf) {
1755
1756 boolean shortCircuitSkipChecksum =
1757 conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
1758 boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
1759 if (shortCircuitSkipChecksum) {
1760 LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
1761 "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
1762 "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
1763 assert !shortCircuitSkipChecksum;
1764 }
1765 checkShortCircuitReadBufferSize(conf);
1766 }
1767
1768
1769
1770
1771
1772 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
1773 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
1774 final int notSet = -1;
1775
1776 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
1777 int size = conf.getInt(dfsKey, notSet);
1778
1779 if (size != notSet) return;
1780
1781 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
1782 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
1783 }
1784
1785
1786
1787
1788 public static DFSHedgedReadMetrics getDFSHedgedReadMetrics(final Configuration c)
1789 throws IOException {
1790 if (!isHDFS(c)) {
1791 return null;
1792 }
1793
1794
1795
1796 final String name = "getHedgedReadMetrics";
1797 DFSClient dfsclient = ((DistributedFileSystem)FileSystem.get(c)).getClient();
1798 Method m;
1799 try {
1800 m = dfsclient.getClass().getDeclaredMethod(name);
1801 } catch (NoSuchMethodException e) {
1802 LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " +
1803 e.getMessage());
1804 return null;
1805 } catch (SecurityException e) {
1806 LOG.warn("Failed find method " + name + " in dfsclient; no hedged read metrics: " +
1807 e.getMessage());
1808 return null;
1809 }
1810 m.setAccessible(true);
1811 try {
1812 return (DFSHedgedReadMetrics)m.invoke(dfsclient);
1813 } catch (IllegalAccessException e) {
1814 LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " +
1815 e.getMessage());
1816 return null;
1817 } catch (IllegalArgumentException e) {
1818 LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " +
1819 e.getMessage());
1820 return null;
1821 } catch (InvocationTargetException e) {
1822 LOG.warn("Failed invoking method " + name + " on dfsclient; no hedged read metrics: " +
1823 e.getMessage());
1824 return null;
1825 }
1826 }
1827 }