1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.util;
20
21 import static org.junit.Assert.assertEquals;
22 import static org.junit.Assert.assertFalse;
23 import static org.junit.Assert.assertNotEquals;
24 import static org.junit.Assert.assertNotNull;
25 import static org.junit.Assert.assertNull;
26 import static org.junit.Assert.assertTrue;
27
28 import java.io.File;
29 import java.io.IOException;
30 import java.util.Random;
31 import java.util.UUID;
32
33 import org.apache.commons.logging.Log;
34 import org.apache.commons.logging.LogFactory;
35 import org.apache.hadoop.conf.Configuration;
36 import org.apache.hadoop.fs.FSDataInputStream;
37 import org.apache.hadoop.fs.FSDataOutputStream;
38 import org.apache.hadoop.fs.FileStatus;
39 import org.apache.hadoop.fs.FileSystem;
40 import org.apache.hadoop.fs.Path;
41 import org.apache.hadoop.fs.permission.FsPermission;
42 import org.apache.hadoop.hbase.HBaseTestingUtility;
43 import org.apache.hadoop.hbase.HConstants;
44 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
45 import org.apache.hadoop.hbase.exceptions.DeserializationException;
46 import org.apache.hadoop.hbase.fs.HFileSystem;
47 import org.apache.hadoop.hbase.testclassification.MediumTests;
48 import org.apache.hadoop.hdfs.DFSConfigKeys;
49 import org.apache.hadoop.hdfs.DFSHedgedReadMetrics;
50 import org.apache.hadoop.hdfs.DFSTestUtil;
51 import org.apache.hadoop.hdfs.MiniDFSCluster;
52 import org.junit.Assert;
53 import org.junit.Before;
54 import org.junit.Test;
55 import org.junit.experimental.categories.Category;
56
57
58
59
60 @Category(MediumTests.class)
61 public class TestFSUtils {
62 private static final Log LOG = LogFactory.getLog(TestFSUtils.class);
63
64 private HBaseTestingUtility htu;
65 private Configuration conf;
66
67 @Before
68 public void setUp() throws IOException {
69 htu = new HBaseTestingUtility();
70 conf = htu.getConfiguration();
71 }
72
73 @Test
74 public void testIsHDFS() throws Exception {
75 HBaseTestingUtility htu = new HBaseTestingUtility();
76 htu.getConfiguration().setBoolean("dfs.support.append", false);
77 assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
78 htu.getConfiguration().setBoolean("dfs.support.append", true);
79 MiniDFSCluster cluster = null;
80 try {
81 cluster = htu.startMiniDFSCluster(1);
82 assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
83 assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
84 } finally {
85 if (cluster != null) cluster.shutdown();
86 }
87 }
88
89 private void WriteDataToHDFS(FileSystem fs, Path file, int dataSize)
90 throws Exception {
91 FSDataOutputStream out = fs.create(file);
92 byte [] data = new byte[dataSize];
93 out.write(data, 0, dataSize);
94 out.close();
95 }
96
97 @Test public void testcomputeHDFSBlocksDistribution() throws Exception {
98 HBaseTestingUtility htu = new HBaseTestingUtility();
99 final int DEFAULT_BLOCK_SIZE = 1024;
100 htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
101 MiniDFSCluster cluster = null;
102 Path testFile = null;
103
104 try {
105
106 String hosts[] = new String[] { "host1", "host2", "host3" };
107 cluster = htu.startMiniDFSCluster(hosts);
108 cluster.waitActive();
109 FileSystem fs = cluster.getFileSystem();
110
111
112 testFile = new Path("/test1.txt");
113 WriteDataToHDFS(fs, testFile, 2*DEFAULT_BLOCK_SIZE);
114
115
116
117
118 final long maxTime = System.currentTimeMillis() + 2000;
119 boolean ok;
120 do {
121 ok = true;
122 FileStatus status = fs.getFileStatus(testFile);
123 HDFSBlocksDistribution blocksDistribution =
124 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
125 long uniqueBlocksTotalWeight =
126 blocksDistribution.getUniqueBlocksTotalWeight();
127 for (String host : hosts) {
128 long weight = blocksDistribution.getWeight(host);
129 ok = (ok && uniqueBlocksTotalWeight == weight);
130 }
131 } while (!ok && System.currentTimeMillis() < maxTime);
132 assertTrue(ok);
133 } finally {
134 htu.shutdownMiniDFSCluster();
135 }
136
137
138 try {
139
140 String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
141 cluster = htu.startMiniDFSCluster(hosts);
142 cluster.waitActive();
143 FileSystem fs = cluster.getFileSystem();
144
145
146 testFile = new Path("/test2.txt");
147 WriteDataToHDFS(fs, testFile, 3*DEFAULT_BLOCK_SIZE);
148
149
150
151
152 final long maxTime = System.currentTimeMillis() + 2000;
153 long weight;
154 long uniqueBlocksTotalWeight;
155 do {
156 FileStatus status = fs.getFileStatus(testFile);
157 HDFSBlocksDistribution blocksDistribution =
158 FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
159 uniqueBlocksTotalWeight = blocksDistribution.getUniqueBlocksTotalWeight();
160
161 String tophost = blocksDistribution.getTopHosts().get(0);
162 weight = blocksDistribution.getWeight(tophost);
163
164
165 } while (uniqueBlocksTotalWeight != weight && System.currentTimeMillis() < maxTime);
166 assertTrue(uniqueBlocksTotalWeight == weight);
167
168 } finally {
169 htu.shutdownMiniDFSCluster();
170 }
171
172
173 try {
174
175 String hosts[] = new String[] { "host1", "host2", "host3", "host4" };
176 cluster = htu.startMiniDFSCluster(hosts);
177 cluster.waitActive();
178 FileSystem fs = cluster.getFileSystem();
179
180
181 testFile = new Path("/test3.txt");
182 WriteDataToHDFS(fs, testFile, DEFAULT_BLOCK_SIZE);
183
184
185
186 final long maxTime = System.currentTimeMillis() + 2000;
187 HDFSBlocksDistribution blocksDistribution;
188 do {
189 FileStatus status = fs.getFileStatus(testFile);
190 blocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
191
192 }
193 while (blocksDistribution.getTopHosts().size() != 3 && System.currentTimeMillis() < maxTime);
194 assertEquals("Wrong number of hosts distributing blocks.", 3,
195 blocksDistribution.getTopHosts().size());
196 } finally {
197 htu.shutdownMiniDFSCluster();
198 }
199 }
200
201 @Test
202 public void testVersion() throws DeserializationException, IOException {
203 final Path rootdir = htu.getDataTestDir();
204 final FileSystem fs = rootdir.getFileSystem(conf);
205 assertNull(FSUtils.getVersion(fs, rootdir));
206
207 Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME);
208 FSDataOutputStream s = fs.create(versionFile);
209 final String version = HConstants.FILE_SYSTEM_VERSION;
210 s.writeUTF(version);
211 s.close();
212 assertTrue(fs.exists(versionFile));
213 FileStatus [] status = fs.listStatus(versionFile);
214 assertNotNull(status);
215 assertTrue(status.length > 0);
216 String newVersion = FSUtils.getVersion(fs, rootdir);
217 assertEquals(version.length(), newVersion.length());
218 assertEquals(version, newVersion);
219
220 assertEquals(version, FSUtils.getVersion(fs, rootdir));
221 FSUtils.checkVersion(fs, rootdir, true);
222 }
223
224 @Test
225 public void testPermMask() throws Exception {
226 final Path rootdir = htu.getDataTestDir();
227 final FileSystem fs = rootdir.getFileSystem(conf);
228
229
230 FsPermission defaultFsPerm = FSUtils.getFilePermissions(fs, conf,
231 HConstants.DATA_FILE_UMASK_KEY);
232
233 assertEquals(FsPermission.getFileDefault(), defaultFsPerm);
234
235 conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
236
237 FsPermission defaultStartPerm = FSUtils.getFilePermissions(fs, conf,
238 HConstants.DATA_FILE_UMASK_KEY);
239
240
241
242
243 assertEquals(new FsPermission(FSUtils.FULL_RWX_PERMISSIONS), defaultStartPerm);
244
245 conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077");
246
247 FsPermission filePerm = FSUtils.getFilePermissions(fs, conf,
248 HConstants.DATA_FILE_UMASK_KEY);
249 assertEquals(new FsPermission("700"), filePerm);
250
251
252 Path p = new Path("target" + File.separator + UUID.randomUUID().toString());
253 try {
254 FSDataOutputStream out = FSUtils.create(conf, fs, p, filePerm, null);
255 out.close();
256 FileStatus stat = fs.getFileStatus(p);
257 assertEquals(new FsPermission("700"), stat.getPermission());
258
259 } finally {
260 fs.delete(p, true);
261 }
262 }
263
264 @Test
265 public void testDeleteAndExists() throws Exception {
266 final Path rootdir = htu.getDataTestDir();
267 final FileSystem fs = rootdir.getFileSystem(conf);
268 conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true);
269 FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY);
270
271 String file = UUID.randomUUID().toString();
272 Path p = new Path(htu.getDataTestDir(), "temptarget" + File.separator + file);
273 Path p1 = new Path(htu.getDataTestDir(), "temppath" + File.separator + file);
274 try {
275 FSDataOutputStream out = FSUtils.create(conf, fs, p, perms, null);
276 out.close();
277 assertTrue("The created file should be present", FSUtils.isExists(fs, p));
278
279 FSUtils.delete(fs, p, false);
280
281 FSDataOutputStream out1 = FSUtils.create(conf, fs, p1, perms, null);
282 out1.close();
283
284 FSUtils.delete(fs, p1, true);
285 assertFalse("The created file should be present", FSUtils.isExists(fs, p1));
286
287 } finally {
288 FSUtils.delete(fs, p, true);
289 FSUtils.delete(fs, p1, true);
290 }
291 }
292
293
294 @Test
295 public void testFilteredStatusDoesNotThrowOnNotFound() throws Exception {
296 HBaseTestingUtility htu = new HBaseTestingUtility();
297 MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
298 try {
299 assertNull(FSUtils.listStatusWithStatusFilter(cluster.getFileSystem(), new Path("definitely/doesn't/exist"), null));
300 } finally {
301 cluster.shutdown();
302 }
303
304 }
305
306 @Test
307 public void testRenameAndSetModifyTime() throws Exception {
308 HBaseTestingUtility htu = new HBaseTestingUtility();
309 Configuration conf = htu.getConfiguration();
310
311 MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
312 assertTrue(FSUtils.isHDFS(conf));
313
314 FileSystem fs = FileSystem.get(conf);
315 Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
316
317 String file = UUID.randomUUID().toString();
318 Path p = new Path(testDir, file);
319
320 FSDataOutputStream out = fs.create(p);
321 out.close();
322 assertTrue("The created file should be present", FSUtils.isExists(fs, p));
323
324 long expect = System.currentTimeMillis() + 1000;
325 assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());
326
327 ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
328 mockEnv.setValue(expect);
329 EnvironmentEdgeManager.injectEdge(mockEnv);
330 try {
331 String dstFile = UUID.randomUUID().toString();
332 Path dst = new Path(testDir , dstFile);
333
334 assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
335 assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
336 assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));
337
338 assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
339 cluster.shutdown();
340 } finally {
341 EnvironmentEdgeManager.reset();
342 }
343 }
344
345 @Test
346 public void testSetStoragePolicyDefault() throws Exception {
347 verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
348 }
349
350
351 @Test
352 public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
353 verifyFileInDirWithStoragePolicy("ALL_SSD");
354 }
355
356
357 @Test
358 public void testSetStoragePolicyInvalid() throws Exception {
359 verifyFileInDirWithStoragePolicy(INVALID_STORAGE_POLICY);
360 }
361
362 final String INVALID_STORAGE_POLICY = "1772";
363
364
365 private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
366 HBaseTestingUtility htu = new HBaseTestingUtility();
367 Configuration conf = htu.getConfiguration();
368 conf.set(HConstants.WAL_STORAGE_POLICY, policy);
369
370 MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
371 try {
372 assertTrue(FSUtils.isHDFS(conf));
373
374 FileSystem fs = FileSystem.get(conf);
375 Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
376 fs.mkdirs(testDir);
377
378 String storagePolicy =
379 conf.get(HConstants.WAL_STORAGE_POLICY, HConstants.DEFAULT_WAL_STORAGE_POLICY);
380 FSUtils.setStoragePolicy(fs, testDir, storagePolicy);
381
382 String file = UUID.randomUUID().toString();
383 Path p = new Path(testDir, file);
384 WriteDataToHDFS(fs, p, 4096);
385 try (HFileSystem hfs = new HFileSystem(fs)) {
386 String policySet = hfs.getStoragePolicyName(p);
387 LOG.debug("The storage policy of path " + p + " is " + policySet);
388 if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)
389 || policy.equals(INVALID_STORAGE_POLICY)) {
390 String hdfsDefaultPolicy = hfs.getStoragePolicyName(hfs.getHomeDirectory());
391 LOG.debug("The default hdfs storage policy (indicated by home path: "
392 + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy);
393 Assert.assertEquals(hdfsDefaultPolicy, policySet);
394 } else {
395 Assert.assertEquals(policy, policySet);
396 }
397
398 cleanupFile(fs, testDir);
399 }
400 } finally {
401 cluster.shutdown();
402 }
403 }
404
405 private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
406 assertTrue(fileSys.exists(name));
407 assertTrue(fileSys.delete(name, true));
408 assertTrue(!fileSys.exists(name));
409 }
410
411 private static final boolean STREAM_CAPABILITIES_IS_PRESENT;
412
413 static {
414 boolean tmp = false;
415 try {
416 Class.forName("org.apache.hadoop.fs.StreamCapabilities");
417 tmp = true;
418 LOG.debug("Test thought StreamCapabilities class was present.");
419 } catch (ClassNotFoundException exception) {
420 LOG.debug("Test didn't think StreamCapabilities class was present.");
421 } finally {
422 STREAM_CAPABILITIES_IS_PRESENT = tmp;
423 }
424 }
425
426
427 @Test
428 public void checkStreamCapabilitiesOnHdfsDataOutputStream() throws Exception {
429 MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
430 try (FileSystem filesystem = cluster.getFileSystem()) {
431 FSDataOutputStream stream = filesystem.create(new Path("/tmp/foobar"));
432 assertTrue(FSUtils.hasCapability(stream, "hsync"));
433 assertTrue(FSUtils.hasCapability(stream, "hflush"));
434 assertNotEquals("We expect HdfsDataOutputStream to say it has a dummy capability iff the " +
435 "StreamCapabilities class is not defined.",
436 STREAM_CAPABILITIES_IS_PRESENT,
437 FSUtils.hasCapability(stream, "a capability that hopefully HDFS doesn't add."));
438 } finally {
439 cluster.shutdown();
440 }
441 }
442
443
444
445
446
447 @Test public void testDFSHedgedReadMetrics() throws Exception {
448 HBaseTestingUtility htu = new HBaseTestingUtility();
449
450
451 Configuration conf = htu.getConfiguration();
452 conf.setInt(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE, 5);
453 conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS, 0);
454 conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
455 conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
456
457 conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
458 conf.setBoolean("dfs.datanode.transferTo.allowed", false);
459 MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
460
461 DFSHedgedReadMetrics metrics = FSUtils.getDFSHedgedReadMetrics(conf);
462 assertEquals(0, metrics.getHedgedReadOps());
463 FileSystem fileSys = cluster.getFileSystem();
464 try {
465 Path p = new Path("preadtest.dat");
466
467 DFSTestUtil.createFile(fileSys, p, 12 * blockSize, 12 * blockSize,
468 blockSize, (short) 3, seed);
469 pReadFile(fileSys, p);
470 cleanupFile(fileSys, p);
471 assertTrue(metrics.getHedgedReadOps() > 0);
472 } finally {
473 fileSys.close();
474 cluster.shutdown();
475 }
476 }
477
478
479 static final int blockSize = 4096;
480 static final long seed = 0xDEADBEEFL;
481
482 private void pReadFile(FileSystem fileSys, Path name) throws IOException {
483 FSDataInputStream stm = fileSys.open(name);
484 byte[] expected = new byte[12 * blockSize];
485 Random rand = new Random(seed);
486 rand.nextBytes(expected);
487
488 byte[] actual = new byte[4096];
489 stm.readFully(actual);
490 checkAndEraseData(actual, 0, expected, "Read Sanity Test");
491
492 actual = new byte[8192];
493 doPread(stm, 0L, actual, 0, 8192);
494 checkAndEraseData(actual, 0, expected, "Pread Test 1");
495
496 actual = new byte[4096];
497 stm.readFully(actual);
498 checkAndEraseData(actual, 4096, expected, "Pread Test 2");
499
500
501 stm.readFully(blockSize - 2048, actual, 0, 4096);
502 checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
503
504
505 actual = new byte[blockSize + 4096];
506 stm.readFully(blockSize - 2048, actual);
507 checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
508
509
510 actual = new byte[blockSize + 4096];
511 stm.readFully(10 * blockSize - 2048, actual);
512 checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
513
514
515 actual = new byte[4096];
516 stm.readFully(actual);
517 checkAndEraseData(actual, 8192, expected, "Pread Test 6");
518
519 stm.close();
520
521 stm = fileSys.open(name);
522 stm.readFully(1, actual, 0, 4096);
523 stm.readFully(4 * blockSize, actual, 0, 4096);
524 stm.readFully(7 * blockSize, actual, 0, 4096);
525 actual = new byte[3 * 4096];
526 stm.readFully(0 * blockSize, actual, 0, 3 * 4096);
527 checkAndEraseData(actual, 0, expected, "Pread Test 7");
528 actual = new byte[8 * 4096];
529 stm.readFully(3 * blockSize, actual, 0, 8 * 4096);
530 checkAndEraseData(actual, 3 * blockSize, expected, "Pread Test 8");
531
532 stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize / 2);
533 IOException res = null;
534 try {
535 stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize);
536 } catch (IOException e) {
537
538 res = e;
539 }
540 assertTrue("Error reading beyond file boundary.", res != null);
541
542 stm.close();
543 }
544
545 private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
546 for (int idx = 0; idx < actual.length; idx++) {
547 assertEquals(message + " byte " + (from + idx) + " differs. expected " +
548 expected[from + idx] + " actual " + actual[idx],
549 actual[idx], expected[from + idx]);
550 actual[idx] = 0;
551 }
552 }
553
554 private void doPread(FSDataInputStream stm, long position, byte[] buffer,
555 int offset, int length) throws IOException {
556 int nread = 0;
557
558 while (nread < length) {
559 int nbytes =
560 stm.read(position + nread, buffer, offset + nread, length - nread);
561 assertTrue("Error in pread", nbytes > 0);
562 nread += nbytes;
563 }
564 }
565 }