1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import com.google.common.collect.Lists;
22
23 import java.io.FileNotFoundException;
24 import java.io.IOException;
25 import java.lang.reflect.InvocationTargetException;
26 import java.lang.reflect.Method;
27 import java.net.URI;
28 import java.net.URISyntaxException;
29 import java.util.List;
30 import java.util.Locale;
31 import java.util.Map;
32 import java.util.concurrent.ConcurrentHashMap;
33
34 import org.apache.commons.logging.Log;
35 import org.apache.commons.logging.LogFactory;
36 import org.apache.hadoop.HadoopIllegalArgumentException;
37 import org.apache.hadoop.conf.Configuration;
38 import org.apache.hadoop.fs.FSDataOutputStream;
39 import org.apache.hadoop.fs.FileStatus;
40 import org.apache.hadoop.fs.FileSystem;
41 import org.apache.hadoop.fs.LocatedFileStatus;
42 import org.apache.hadoop.fs.Path;
43 import org.apache.hadoop.fs.PathFilter;
44 import org.apache.hadoop.fs.RemoteIterator;
45 import org.apache.hadoop.fs.permission.FsPermission;
46 import org.apache.hadoop.hbase.HConstants;
47 import org.apache.hadoop.hbase.TableName;
48 import org.apache.hadoop.hbase.classification.InterfaceAudience;
49 import org.apache.hadoop.ipc.RemoteException;
50
51
52
53
54 @InterfaceAudience.Private
55 public abstract class CommonFSUtils {
56 private static final Log LOG = LogFactory.getLog(CommonFSUtils.class);
57
58
59 public static final String HBASE_WAL_DIR = "hbase.wal.dir";
60
61
62 public static final String UNSAFE_STREAM_CAPABILITY_ENFORCE =
63 "hbase.unsafe.stream.capability.enforce";
64
65
66 public static final String FULL_RWX_PERMISSIONS = "777";
67
68 protected CommonFSUtils() {
69 super();
70 }
71
72
73
74
75
76
77
78
79
80 public static boolean isStartingWithPath(final Path rootPath, final String path) {
81 String uriRootPath = rootPath.toUri().getPath();
82 String tailUriPath = (new Path(path)).toUri().getPath();
83 return tailUriPath.startsWith(uriRootPath);
84 }
85
86
87
88
89
90
91
92
93
94 public static boolean isMatchingTail(final Path pathToSearch, String pathTail) {
95 return isMatchingTail(pathToSearch, new Path(pathTail));
96 }
97
98
99
100
101
102
103
104
105
106 public static boolean isMatchingTail(final Path pathToSearch, final Path pathTail) {
107 if (pathToSearch.depth() != pathTail.depth()) {
108 return false;
109 }
110 Path tailPath = pathTail;
111 String tailName;
112 Path toSearch = pathToSearch;
113 String toSearchName;
114 boolean result = false;
115 do {
116 tailName = tailPath.getName();
117 if (tailName == null || tailName.length() <= 0) {
118 result = true;
119 break;
120 }
121 toSearchName = toSearch.getName();
122 if (toSearchName == null || toSearchName.length() <= 0) {
123 break;
124 }
125
126 tailPath = tailPath.getParent();
127 toSearch = toSearch.getParent();
128 } while(tailName.equals(toSearchName));
129 return result;
130 }
131
132
133
134
135
136
137
138
139 public static boolean deleteDirectory(final FileSystem fs, final Path dir) throws IOException {
140 return fs.exists(dir) && fs.delete(dir, true);
141 }
142
143
144
145
146
147
148
149
150 public static long getDefaultBlockSize(final FileSystem fs, final Path path) {
151 return fs.getDefaultBlockSize(path);
152 }
153
154
155
156
157
158
159
160
161 public static short getDefaultReplication(final FileSystem fs, final Path path) {
162 return fs.getDefaultReplication(path);
163 }
164
165
166
167
168
169
170
171
172
173
174
175 public static int getDefaultBufferSize(final FileSystem fs) {
176 return fs.getConf().getInt("io.file.buffer.size", 4096);
177 }
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196 public static FSDataOutputStream create(FileSystem fs, Path path,
197 FsPermission perm, boolean overwrite) throws IOException {
198 if (LOG.isTraceEnabled()) {
199 LOG.trace("Creating file=" + path + " with permission=" + perm + ", overwrite=" + overwrite);
200 }
201 return fs.create(path, perm, overwrite, getDefaultBufferSize(fs),
202 getDefaultReplication(fs, path), getDefaultBlockSize(fs, path), null);
203 }
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218 public static FsPermission getFilePermissions(final FileSystem fs,
219 final Configuration conf, final String permssionConfKey) {
220 boolean enablePermissions = conf.getBoolean(
221 HConstants.ENABLE_DATA_FILE_UMASK, false);
222
223 if (enablePermissions) {
224 try {
225 FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS);
226
227 String mask = conf.get(permssionConfKey);
228 if (mask == null) {
229 return FsPermission.getFileDefault();
230 }
231
232 FsPermission umask = new FsPermission(mask);
233 return perm.applyUMask(umask);
234 } catch (IllegalArgumentException e) {
235 LOG.warn(
236 "Incorrect umask attempted to be created: "
237 + conf.get(permssionConfKey)
238 + ", using default file permissions.", e);
239 return FsPermission.getFileDefault();
240 }
241 }
242 return FsPermission.getFileDefault();
243 }
244
245
246
247
248
249
250
251
252 public static Path validateRootPath(Path root) throws IOException {
253 try {
254 URI rootURI = new URI(root.toString());
255 String scheme = rootURI.getScheme();
256 if (scheme == null) {
257 throw new IOException("Root directory does not have a scheme");
258 }
259 return root;
260 } catch (URISyntaxException e) {
261 throw new IOException("Root directory path is not a valid " +
262 "URI -- check your " + HConstants.HBASE_DIR + " configuration", e);
263 }
264 }
265
266
267
268
269
270
271
272
273
274
275 public static String removeWALRootPath(Path path, final Configuration conf) throws IOException {
276 Path root = getWALRootDir(conf);
277 String pathStr = path.toString();
278
279 if (!pathStr.startsWith(root.toString())) {
280 return pathStr;
281 }
282
283 return pathStr.substring(root.toString().length() + 1);
284 }
285
286
287
288
289
290
291
292
293
294
295
296 public static String getPath(Path p) {
297 return p.toUri().getPath();
298 }
299
300
301
302
303
304
305
306 public static Path getRootDir(final Configuration c) throws IOException {
307 Path p = new Path(c.get(HConstants.HBASE_DIR));
308 FileSystem fs = p.getFileSystem(c);
309 return p.makeQualified(fs);
310 }
311
312 public static void setRootDir(final Configuration c, final Path root) {
313 c.set(HConstants.HBASE_DIR, root.toString());
314 }
315
316 public static void setFsDefault(final Configuration c, final Path root) {
317 c.set("fs.defaultFS", root.toString());
318 }
319
320 public static FileSystem getRootDirFileSystem(final Configuration c) throws IOException {
321 Path p = getRootDir(c);
322 return p.getFileSystem(c);
323 }
324
325
326
327
328
329
330
331 public static Path getWALRootDir(final Configuration c) throws IOException {
332 Path p = new Path(c.get(HBASE_WAL_DIR, c.get(HConstants.HBASE_DIR)));
333 if (!isValidWALRootDir(p, c)) {
334 return getRootDir(c);
335 }
336 FileSystem fs = p.getFileSystem(c);
337 return p.makeQualified(fs);
338 }
339
340 public static void setWALRootDir(final Configuration c, final Path root) {
341 c.set(HBASE_WAL_DIR, root.toString());
342 }
343
344 public static FileSystem getWALFileSystem(final Configuration c) throws IOException {
345 Path p = getWALRootDir(c);
346 FileSystem fs = p.getFileSystem(c);
347
348 String enforceStreamCapability = c.get(UNSAFE_STREAM_CAPABILITY_ENFORCE);
349 if (enforceStreamCapability != null) {
350 fs.getConf().set(UNSAFE_STREAM_CAPABILITY_ENFORCE, enforceStreamCapability);
351 }
352 return fs;
353 }
354
355 private static boolean isValidWALRootDir(Path walDir, final Configuration c) throws IOException {
356 Path rootDir = getRootDir(c);
357 if (walDir != rootDir) {
358 if (walDir.toString().startsWith(rootDir.toString() + "/")) {
359 throw new IllegalStateException("Illegal WAL directory specified. " +
360 "WAL directories are not permitted to be under the root directory if set.");
361 }
362 }
363 return true;
364 }
365
366
367
368
369
370
371
372
373
374 public static Path getTableDir(Path rootdir, final TableName tableName) {
375 return new Path(getNamespaceDir(rootdir, tableName.getNamespaceAsString()),
376 tableName.getQualifierAsString());
377 }
378
379
380
381
382
383
384
385
386
387
388 public static Path getRegionDir(Path rootdir, TableName tableName, String regionName) {
389 return new Path(getTableDir(rootdir, tableName), regionName);
390 }
391
392 public static Path getWALTableDir(Configuration c, TableName tableName) throws IOException {
393 return new Path(getNamespaceDir(getWALRootDir(c), tableName.getNamespaceAsString()),
394 tableName.getQualifierAsString());
395 }
396
397
398
399
400
401
402
403
404
405 public static TableName getTableName(Path tablePath) {
406 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
407 }
408
409
410
411
412
413
414
415
416
417 public static Path getNamespaceDir(Path rootdir, final String namespace) {
418 return new Path(rootdir, new Path(HConstants.BASE_NAMESPACE_DIR,
419 new Path(namespace)));
420 }
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440 public static void setStoragePolicy(final FileSystem fs, final Configuration conf,
441 final Path path, final String policyKey, final String defaultPolicy) {
442 String storagePolicy = conf.get(policyKey, defaultPolicy).toUpperCase(Locale.ROOT);
443 if (storagePolicy.equals(defaultPolicy)) {
444 if (LOG.isTraceEnabled()) {
445 LOG.trace("default policy of " + defaultPolicy + " requested, exiting early.");
446 }
447 return;
448 }
449 setStoragePolicy(fs, path, storagePolicy);
450 }
451
452 private static final Map<FileSystem, Boolean> warningMap = new ConcurrentHashMap<>();
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 public static void setStoragePolicy(final FileSystem fs, final Path path,
470 final String storagePolicy) {
471 try {
472 setStoragePolicy(fs, path, storagePolicy, false);
473 } catch (IOException e) {
474
475 LOG.warn("We have chosen not to throw exception but some unexpectedly thrown out", e);
476 }
477 }
478
479 static void setStoragePolicy(final FileSystem fs, final Path path, final String storagePolicy,
480 boolean throwException) throws IOException {
481 if (storagePolicy == null) {
482 if (LOG.isTraceEnabled()) {
483 LOG.trace("We were passed a null storagePolicy, exiting early.");
484 }
485 return;
486 }
487 String trimmedStoragePolicy = storagePolicy.trim();
488 if (trimmedStoragePolicy.isEmpty()) {
489 if (LOG.isTraceEnabled()) {
490 LOG.trace("We were passed an empty storagePolicy, exiting early.");
491 }
492 return;
493 } else {
494 trimmedStoragePolicy = trimmedStoragePolicy.toUpperCase(Locale.ROOT);
495 }
496 if (trimmedStoragePolicy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)) {
497 if (LOG.isTraceEnabled()) {
498 LOG.trace(
499 "We were passed the defer-to-hdfs policy " + trimmedStoragePolicy + ", exiting early.");
500 }
501 return;
502 }
503 try {
504 invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
505 } catch (IOException e) {
506 if (!warningMap.containsKey(fs)) {
507 warningMap.put(fs, true);
508 LOG.warn("Failed to invoke set storage policy API on FS; presuming it doesn't "
509 + "support setStoragePolicy. Unable to set storagePolicy=" + trimmedStoragePolicy
510 + " on path=" + path);
511 } else if (LOG.isDebugEnabled()) {
512 LOG.debug("Failed to invoke set storage policy API on FS; presuming it doesn't "
513 + "support setStoragePolicy. Unable to set storagePolicy=" + trimmedStoragePolicy
514 + " on path=" + path);
515 }
516 if (throwException) {
517 throw e;
518 }
519 }
520 }
521
522
523
524
525 private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
526 final String storagePolicy) throws IOException {
527 Exception toThrow = null;
528
529 try {
530 fs.setStoragePolicy(path, storagePolicy);
531
532 if (LOG.isDebugEnabled()) {
533 LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
534 }
535 } catch (Exception e) {
536 toThrow = e;
537
538
539 if (!warningMap.containsKey(fs)) {
540 warningMap.put(fs, true);
541 LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
542 } else if (LOG.isDebugEnabled()) {
543 LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
544 }
545
546
547 if (e instanceof RemoteException &&
548 HadoopIllegalArgumentException.class.getName().equals(
549 ((RemoteException)e).getClassName())) {
550 if (LOG.isDebugEnabled()) {
551 LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
552 "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
553 "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
554 "more information see the 'ArchivalStorage' docs for your Hadoop release.");
555 }
556 }
557 }
558
559 if (toThrow != null) {
560 throw new IOException(toThrow);
561 }
562 }
563
564
565
566
567
568
569 public static boolean isHDFS(final Configuration conf) throws IOException {
570 FileSystem fs = FileSystem.get(conf);
571 String scheme = fs.getUri().getScheme();
572 return scheme.equalsIgnoreCase("hdfs");
573 }
574
575
576
577
578
579
580 public static boolean isRecoveredEdits(Path path) {
581 return path.toString().contains(HConstants.RECOVERED_EDITS_DIR);
582 }
583
584
585
586
587
588
589 public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
590 return getRootDir(conf).getFileSystem(conf);
591 }
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607 public static FileStatus[] listStatus(final FileSystem fs,
608 final Path dir, final PathFilter filter) throws IOException {
609 FileStatus [] status = null;
610 try {
611 status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter);
612 } catch (FileNotFoundException fnfe) {
613
614 if (LOG.isTraceEnabled()) {
615 LOG.trace(dir + " doesn't exist");
616 }
617 }
618 if (status == null || status.length < 1) {
619 return null;
620 }
621 return status;
622 }
623
624
625
626
627
628
629
630
631
632 public static FileStatus[] listStatus(final FileSystem fs, final Path dir) throws IOException {
633 return listStatus(fs, dir, null);
634 }
635
636
637
638
639
640
641
642
643 public static List<LocatedFileStatus> listLocatedStatus(final FileSystem fs,
644 final Path dir) throws IOException {
645 List<LocatedFileStatus> status = null;
646 try {
647 RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator = fs
648 .listFiles(dir, false);
649 while (locatedFileStatusRemoteIterator.hasNext()) {
650 if (status == null) {
651 status = Lists.newArrayList();
652 }
653 status.add(locatedFileStatusRemoteIterator.next());
654 }
655 } catch (FileNotFoundException fnfe) {
656
657 if (LOG.isTraceEnabled()) {
658 LOG.trace(dir + " doesn't exist");
659 }
660 }
661 return status;
662 }
663
664
665
666
667
668
669
670
671
672
673 public static boolean delete(final FileSystem fs, final Path path, final boolean recursive)
674 throws IOException {
675 return fs.delete(path, recursive);
676 }
677
678
679
680
681
682
683
684
685
686 public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
687 return fs.exists(path);
688 }
689
690
691
692
693
694
695
696
697 public static void logFileSystemState(final FileSystem fs, final Path root, Log log)
698 throws IOException {
699 log.debug("Current file system:");
700 logFSTree(log, fs, root, "|-");
701 }
702
703
704
705
706
707
708 private static void logFSTree(Log log, final FileSystem fs, final Path root, String prefix)
709 throws IOException {
710 FileStatus[] files = listStatus(fs, root, null);
711 if (files == null) {
712 return;
713 }
714
715 for (FileStatus file : files) {
716 if (file.isDirectory()) {
717 log.debug(prefix + file.getPath().getName() + "/");
718 logFSTree(log, fs, file.getPath(), prefix + "---");
719 } else {
720 log.debug(prefix + file.getPath().getName());
721 }
722 }
723 }
724
725 public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src, final Path dest)
726 throws IOException {
727
728 fs.setTimes(src, EnvironmentEdgeManager.currentTime(), -1);
729 return fs.rename(src, dest);
730 }
731
732
733
734
735
736 public static void checkShortCircuitReadBufferSize(final Configuration conf) {
737 final int defaultSize = HConstants.DEFAULT_BLOCKSIZE * 2;
738 final int notSet = -1;
739
740 final String dfsKey = "dfs.client.read.shortcircuit.buffer.size";
741 int size = conf.getInt(dfsKey, notSet);
742
743 if (size != notSet) {
744 return;
745 }
746
747 int hbaseSize = conf.getInt("hbase." + dfsKey, defaultSize);
748 conf.setIfUnset(dfsKey, Integer.toString(hbaseSize));
749 }
750
751
752
753 private static class StreamCapabilities {
754 public static final boolean PRESENT;
755 public static final Class<?> CLASS;
756 public static final Method METHOD;
757 static {
758 boolean tmp = false;
759 Class<?> clazz = null;
760 Method method = null;
761 try {
762 clazz = Class.forName("org.apache.hadoop.fs.StreamCapabilities");
763 method = clazz.getMethod("hasCapability", String.class);
764 tmp = true;
765 } catch(ClassNotFoundException|NoSuchMethodException|SecurityException exception) {
766 LOG.warn("Your Hadoop installation does not include the StreamCapabilities class from " +
767 "HDFS-11644, so we will skip checking if any FSDataOutputStreams actually " +
768 "support hflush/hsync. If you are running on top of HDFS this probably just " +
769 "means you have an older version and this can be ignored. If you are running on " +
770 "top of an alternate FileSystem implementation you should manually verify that " +
771 "hflush and hsync are implemented; otherwise you risk data loss and hard to " +
772 "diagnose errors when our assumptions are violated.");
773 LOG.debug("The first request to check for StreamCapabilities came from this stacktrace.",
774 exception);
775 } finally {
776 PRESENT = tmp;
777 CLASS = clazz;
778 METHOD = method;
779 }
780 }
781 }
782
783
784
785
786
787
788
789
790
791
792 public static boolean hasCapability(FSDataOutputStream stream, String capability) {
793
794 if (stream == null) {
795 throw new NullPointerException("stream parameter must not be null.");
796 }
797
798
799 boolean result = true;
800 if (StreamCapabilities.PRESENT) {
801
802
803
804 result = false;
805 if (StreamCapabilities.CLASS.isAssignableFrom(stream.getClass())) {
806 try {
807 result = ((Boolean)StreamCapabilities.METHOD.invoke(stream, capability)).booleanValue();
808 } catch (IllegalAccessException|IllegalArgumentException|InvocationTargetException
809 exception) {
810 LOG.warn("Your Hadoop installation's StreamCapabilities implementation doesn't match " +
811 "our understanding of how it's supposed to work. Please file a JIRA and include " +
812 "the following stack trace. In the mean time we're interpreting this behavior " +
813 "difference as a lack of capability support, which will probably cause a failure.",
814 exception);
815 }
816 }
817 }
818 return result;
819 }
820
821
822
823
824
825 public static class StreamLacksCapabilityException extends IOException {
826 private static final long serialVersionUID = 1L;
827 public StreamLacksCapabilityException(String message, Throwable cause) {
828 super(message, cause);
829 }
830 public StreamLacksCapabilityException(String message) {
831 super(message);
832 }
833 }
834 }