View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.backup;
19  
20  import java.io.FileNotFoundException;
21  import java.io.IOException;
22  import java.util.ArrayList;
23  import java.util.Arrays;
24  import java.util.Collection;
25  import java.util.Collections;
26  import java.util.List;
27  
28  import org.apache.commons.logging.Log;
29  import org.apache.commons.logging.LogFactory;
30  import org.apache.hadoop.conf.Configuration;
31  import org.apache.hadoop.fs.FileStatus;
32  import org.apache.hadoop.fs.FileSystem;
33  import org.apache.hadoop.fs.Path;
34  import org.apache.hadoop.fs.PathFilter;
35  import org.apache.hadoop.hbase.HRegionInfo;
36  import org.apache.hadoop.hbase.classification.InterfaceAudience;
37  import org.apache.hadoop.hbase.regionserver.StoreFile;
38  import org.apache.hadoop.hbase.util.Bytes;
39  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
40  import org.apache.hadoop.hbase.util.FSUtils;
41  import org.apache.hadoop.hbase.util.HFileArchiveUtil;
42  import org.apache.hadoop.io.MultipleIOException;
43  
44  import com.google.common.base.Function;
45  import com.google.common.base.Preconditions;
46  import com.google.common.collect.Collections2;
47  import com.google.common.collect.Lists;
48  
49  /**
50   * Utility class to handle the removal of HFiles (or the respective {@link StoreFile StoreFiles})
51   * for a HRegion from the {@link FileSystem}. The hfiles will be archived or deleted, depending on
52   * the state of the system. 
53   */
54  @InterfaceAudience.Private
55  public class HFileArchiver {
56    private static final Log LOG = LogFactory.getLog(HFileArchiver.class);
57    private static final String SEPARATOR = ".";
58  
59    /** Number of retries in case of fs operation failure */
60    private static final int DEFAULT_RETRIES_NUMBER = 3;
61  
62    private static final Function<File, Path> FUNC_FILE_TO_PATH =
63        new Function<File, Path>() {
64          @Override
65          public Path apply(File file) {
66            return file == null ? null : file.getPath();
67          }
68        };
69  
70    private HFileArchiver() {
71      // hidden ctor since this is just a util
72    }
73  
74    /**
75     * Cleans up all the files for a HRegion by archiving the HFiles to the
76     * archive directory
77     * @param conf the configuration to use
78     * @param fs the file system object
79     * @param info HRegionInfo for region to be deleted
80     * @throws IOException
81     */
82    public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
83        throws IOException {
84      Path rootDir = FSUtils.getRootDir(conf);
85      archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()),
86        FSUtils.getRegionDirFromRootDir(rootDir, info));
87    }
88  
89    /**
90     * Remove an entire region from the table directory via archiving the region's hfiles.
91     * @param fs {@link FileSystem} from which to remove the region
92     * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
93     *          the archive path)
94     * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
95     * @param regionDir {@link Path} to where a region is being stored (for building the archive path)
96     * @return <tt>true</tt> if the region was sucessfully deleted. <tt>false</tt> if the filesystem
97     *         operations could not complete.
98     * @throws IOException if the request cannot be completed
99     */
100   public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir)
101       throws IOException {
102     if (LOG.isDebugEnabled()) {
103       LOG.debug("ARCHIVING " + regionDir.toString());
104     }
105 
106     // otherwise, we archive the files
107     // make sure we can archive
108     if (tableDir == null || regionDir == null) {
109       LOG.error("No archive directory could be found because tabledir (" + tableDir
110           + ") or regiondir (" + regionDir + "was null. Deleting files instead.");
111       deleteRegionWithoutArchiving(fs, regionDir);
112       // we should have archived, but failed to. Doesn't matter if we deleted
113       // the archived files correctly or not.
114       return false;
115     }
116 
117     // make sure the regiondir lives under the tabledir
118     Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
119     Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir,
120         FSUtils.getTableName(tableDir),
121         regionDir.getName());
122 
123     FileStatusConverter getAsFile = new FileStatusConverter(fs);
124     // otherwise, we attempt to archive the store files
125 
126     // build collection of just the store directories to archive
127     Collection<File> toArchive = new ArrayList<File>();
128     final PathFilter dirFilter = new FSUtils.DirFilter(fs);
129     PathFilter nonHidden = new PathFilter() {
130       @Override
131       public boolean accept(Path file) {
132         return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
133       }
134     };
135     FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
136     // if there no files, we can just delete the directory and return;
137     if (storeDirs == null) {
138       LOG.debug("Region directory (" + regionDir + ") was empty, just deleting and returning!");
139       return deleteRegionWithoutArchiving(fs, regionDir);
140     }
141 
142     // convert the files in the region to a File
143     toArchive.addAll(Lists.transform(Arrays.asList(storeDirs), getAsFile));
144     LOG.debug("Archiving " + toArchive);
145     List<File> failedArchive = resolveAndArchive(fs, regionArchiveDir, toArchive,
146         EnvironmentEdgeManager.currentTime());
147     if (!failedArchive.isEmpty()) {
148       throw new FailedArchiveException("Failed to archive/delete all the files for region:"
149           + regionDir.getName() + " into " + regionArchiveDir
150           + ". Something is probably awry on the filesystem.",
151           Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
152     }
153     // if that was successful, then we delete the region
154     return deleteRegionWithoutArchiving(fs, regionDir);
155   }
156 
157   /**
158    * Remove from the specified region the store files of the specified column family,
159    * either by archiving them or outright deletion
160    * @param fs the filesystem where the store files live
161    * @param conf {@link Configuration} to examine to determine the archive directory
162    * @param parent Parent region hosting the store files
163    * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
164    * @param family the family hosting the store files
165    * @throws IOException if the files could not be correctly disposed.
166    */
167   public static void archiveFamily(FileSystem fs, Configuration conf,
168       HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
169     Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
170     FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
171     if (storeFiles == null) {
172       LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
173           ", family=" + Bytes.toString(family));
174       return;
175     }
176 
177     FileStatusConverter getAsFile = new FileStatusConverter(fs);
178     Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
179     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);
180 
181     // do the actual archive
182     List<File> failedArchive = resolveAndArchive(fs, storeArchiveDir, toArchive,
183         EnvironmentEdgeManager.currentTime());
184     if (!failedArchive.isEmpty()){
185       throw new FailedArchiveException("Failed to archive/delete all the files for region:"
186           + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
187           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.",
188           Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
189     }
190   }
191 
192   /**
193    * Remove the store files, either by archiving them or outright deletion
194    * @param conf {@link Configuration} to examine to determine the archive directory
195    * @param fs the filesystem where the store files live
196    * @param regionInfo {@link HRegionInfo} of the region hosting the store files
197    * @param family the family hosting the store files
198    * @param compactedFiles files to be disposed of. No further reading of these files should be
199    *          attempted; otherwise likely to cause an {@link IOException}
200    * @throws IOException if the files could not be correctly disposed.
201    */
202   public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
203       Path tableDir, byte[] family, Collection<StoreFile> compactedFiles)
204       throws IOException, FailedArchiveException {
205 
206     // sometimes in testing, we don't have rss, so we need to check for that
207     if (fs == null) {
208       LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:"
209           + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family));
210       deleteStoreFilesWithoutArchiving(compactedFiles);
211       return;
212     }
213 
214     // short circuit if we don't have any files to delete
215     if (compactedFiles.size() == 0) {
216       LOG.debug("No store files to dispose, done!");
217       return;
218     }
219 
220     // build the archive path
221     if (regionInfo == null || family == null) throw new IOException(
222         "Need to have a region and a family to archive from.");
223 
224     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
225 
226     // make sure we don't archive if we can't and that the archive dir exists
227     if (!fs.mkdirs(storeArchiveDir)) {
228       throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
229           + Bytes.toString(family) + ", deleting compacted files instead.");
230     }
231 
232     // otherwise we attempt to archive the store files
233     if (LOG.isDebugEnabled()) LOG.debug("Archiving compacted store files.");
234 
235     // Wrap the storefile into a File
236     StoreToFile getStorePath = new StoreToFile(fs);
237     Collection<File> storeFiles = Collections2.transform(compactedFiles, getStorePath);
238 
239     // do the actual archive
240     List<File> failedArchive = resolveAndArchive(fs, storeArchiveDir, storeFiles,
241         EnvironmentEdgeManager.currentTime());
242 
243     if (!failedArchive.isEmpty()){
244       throw new FailedArchiveException("Failed to archive/delete all the files for region:"
245           + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family)
246           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.",
247           Collections2.transform(failedArchive, FUNC_FILE_TO_PATH));
248     }
249   }
250 
251   /**
252    * Archive the store file
253    * @param fs the filesystem where the store files live
254    * @param regionInfo region hosting the store files
255    * @param conf {@link Configuration} to examine to determine the archive directory
256    * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
257    * @param family the family hosting the store files
258    * @param storeFile file to be archived
259    * @throws IOException if the files could not be correctly disposed.
260    */
261   public static void archiveStoreFile(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
262       Path tableDir, byte[] family, Path storeFile) throws IOException {
263     Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
264     // make sure we don't archive if we can't and that the archive dir exists
265     if (!fs.mkdirs(storeArchiveDir)) {
266       throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
267           + Bytes.toString(family) + ", deleting compacted files instead.");
268     }
269 
270     // do the actual archive
271     long start = EnvironmentEdgeManager.currentTime();
272     File file = new FileablePath(fs, storeFile);
273     if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
274       throw new IOException("Failed to archive/delete the file for region:"
275           + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
276           + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
277     }
278   }
279 
280   /**
281    * Resolve any conflict with an existing archive file via timestamp-append
282    * renaming of the existing file and then archive the passed in files.
283    * @param fs {@link FileSystem} on which to archive the files
284    * @param baseArchiveDir base archive directory to store the files. If any of
285    *          the files to archive are directories, will append the name of the
286    *          directory to the base archive directory name, creating a parallel
287    *          structure.
288    * @param toArchive files/directories that need to be archvied
289    * @param start time the archiving started - used for resolving archive
290    *          conflicts.
291    * @return the list of failed to archive files.
292    * @throws IOException if an unexpected file operation exception occured
293    */
294   private static List<File> resolveAndArchive(FileSystem fs, Path baseArchiveDir,
295       Collection<File> toArchive, long start) throws IOException {
296     // short circuit if no files to move
297     if (toArchive.size() == 0) return Collections.emptyList();
298 
299     if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive directory: " + baseArchiveDir);
300 
301     // make sure the archive directory exists
302     if (!fs.exists(baseArchiveDir)) {
303       if (!fs.mkdirs(baseArchiveDir)) {
304         throw new IOException("Failed to create the archive directory:" + baseArchiveDir
305             + ", quitting archive attempt.");
306       }
307       if (LOG.isTraceEnabled()) LOG.trace("Created archive directory:" + baseArchiveDir);
308     }
309 
310     List<File> failures = new ArrayList<File>();
311     String startTime = Long.toString(start);
312     for (File file : toArchive) {
313       // if its a file archive it
314       try {
315         if (LOG.isTraceEnabled()) LOG.trace("Archiving: " + file);
316         if (file.isFile()) {
317           // attempt to archive the file
318           if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) {
319             LOG.warn("Couldn't archive " + file + " into backup directory: " + baseArchiveDir);
320             failures.add(file);
321           }
322         } else {
323           // otherwise its a directory and we need to archive all files
324           if (LOG.isTraceEnabled()) LOG.trace(file + " is a directory, archiving children files");
325           // so we add the directory name to the one base archive
326           Path parentArchiveDir = new Path(baseArchiveDir, file.getName());
327           // and then get all the files from that directory and attempt to
328           // archive those too
329           Collection<File> children = file.getChildren();
330           failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start));
331         }
332       } catch (IOException e) {
333         LOG.warn("Failed to archive " + file, e);
334         failures.add(file);
335       }
336     }
337     return failures;
338   }
339 
340   /**
341    * Attempt to archive the passed in file to the archive directory.
342    * <p>
343    * If the same file already exists in the archive, it is moved to a timestamped directory under
344    * the archive directory and the new file is put in its place.
345    * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles
346    * @param currentFile {@link Path} to the original HFile that will be archived
347    * @param archiveStartTime time the archiving started, to resolve naming conflicts
348    * @return <tt>true</tt> if the file is successfully archived. <tt>false</tt> if there was a
349    *         problem, but the operation still completed.
350    * @throws IOException on failure to complete {@link FileSystem} operations.
351    */
352   private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
353       String archiveStartTime) throws IOException {
354     // build path as it should be in the archive
355     String filename = currentFile.getName();
356     Path archiveFile = new Path(archiveDir, filename);
357     FileSystem fs = currentFile.getFileSystem();
358 
359     // if the file already exists in the archive, move that one to a timestamped backup. This is a
360     // really, really unlikely situtation, where we get the same name for the existing file, but
361     // is included just for that 1 in trillion chance.
362     if (fs.exists(archiveFile)) {
363       if (LOG.isDebugEnabled()) {
364         LOG.debug("File:" + archiveFile + " already exists in archive, moving to "
365             + "timestamped backup and overwriting current.");
366       }
367 
368       // move the archive file to the stamped backup
369       Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
370       if (!fs.rename(archiveFile, backedupArchiveFile)) {
371         LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
372             + ", deleting existing file in favor of newer.");
373         // try to delete the exisiting file, if we can't rename it
374         if (!fs.delete(archiveFile, false)) {
375           throw new IOException("Couldn't delete existing archive file (" + archiveFile
376               + ") or rename it to the backup file (" + backedupArchiveFile
377               + ") to make room for similarly named file.");
378         }
379       }
380       LOG.debug("Backed up archive file from " + archiveFile);
381     }
382 
383     if (LOG.isTraceEnabled()) {
384       LOG.trace("No existing file in archive for: " + archiveFile +
385         ", free to archive original file.");
386     }
387 
388     // at this point, we should have a free spot for the archive file
389     boolean success = false;
390     for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) {
391       if (i > 0) {
392         // Ensure that the archive directory exists.
393         // The previous "move to archive" operation has failed probably because
394         // the cleaner has removed our archive directory (HBASE-7643).
395         // (we're in a retry loop, so don't worry too much about the exception)
396         try {
397           if (!fs.exists(archiveDir)) {
398             if (fs.mkdirs(archiveDir)) {
399               LOG.debug("Created archive directory:" + archiveDir);
400             }
401           }
402         } catch (IOException e) {
403           LOG.warn("Failed to create directory: " + archiveDir, e);
404         }
405       }
406 
407       try {
408         success = currentFile.moveAndClose(archiveFile);
409       } catch (FileNotFoundException fnfe) {
410         LOG.warn("Failed to archive " + currentFile +
411             " because it does not exist! Skipping and continuing on.", fnfe);
412         success = true;
413       } catch (IOException e) {
414         LOG.warn("Failed to archive " + currentFile + " on try #" + i, e);
415         success = false;
416       }
417     }
418 
419     if (!success) {
420       LOG.error("Failed to archive " + currentFile);
421       return false;
422     }
423 
424     if (LOG.isDebugEnabled()) {
425       LOG.debug("Finished archiving from " + currentFile + ", to " + archiveFile);
426     }
427     return true;
428   }
429 
430   /**
431    * Without regard for backup, delete a region. Should be used with caution.
432    * @param regionDir {@link Path} to the region to be deleted.
433    * @param fs FileSystem from which to delete the region
434    * @return <tt>true</tt> on successful deletion, <tt>false</tt> otherwise
435    * @throws IOException on filesystem operation failure
436    */
437   private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir)
438       throws IOException {
439     if (fs.delete(regionDir, true)) {
440       LOG.debug("Deleted all region files in: " + regionDir);
441       return true;
442     }
443     LOG.debug("Failed to delete region directory:" + regionDir);
444     return false;
445   }
446 
447   /**
448    * Just do a simple delete of the given store files
449    * <p>
450    * A best effort is made to delete each of the files, rather than bailing on the first failure.
451    * <p>
452    * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
453    * less resources, but is limited in terms of usefulness
454    * @param compactedFiles store files to delete from the file system.
455    * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
456    *           throwing the exception, rather than failing at the first file.
457    */
458   private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
459       throws IOException {
460     LOG.debug("Deleting store files without archiving.");
461     List<IOException> errors = new ArrayList<IOException>(0);
462     for (StoreFile hsf : compactedFiles) {
463       try {
464         hsf.deleteReader();
465       } catch (IOException e) {
466         LOG.error("Failed to delete store file:" + hsf.getPath());
467         errors.add(e);
468       }
469     }
470     if (errors.size() > 0) {
471       throw MultipleIOException.createIOException(errors);
472     }
473   }
474 
475   /**
476    * Adapt a type to match the {@link File} interface, which is used internally for handling
477    * archival/removal of files
478    * @param <T> type to adapt to the {@link File} interface
479    */
480   private static abstract class FileConverter<T> implements Function<T, File> {
481     protected final FileSystem fs;
482 
483     public FileConverter(FileSystem fs) {
484       this.fs = fs;
485     }
486   }
487 
488   /**
489    * Convert a FileStatus to something we can manage in the archiving
490    */
491   private static class FileStatusConverter extends FileConverter<FileStatus> {
492     public FileStatusConverter(FileSystem fs) {
493       super(fs);
494     }
495 
496     @Override
497     public File apply(FileStatus input) {
498       return new FileablePath(fs, input.getPath());
499     }
500   }
501 
502   /**
503    * Convert the {@link StoreFile} into something we can manage in the archive
504    * methods
505    */
506   private static class StoreToFile extends FileConverter<StoreFile> {
507     public StoreToFile(FileSystem fs) {
508       super(fs);
509     }
510 
511     @Override
512     public File apply(StoreFile input) {
513       return new FileableStoreFile(fs, input);
514     }
515   }
516 
517   /**
518    * Wrapper to handle file operations uniformly
519    */
520   private static abstract class File {
521     protected final FileSystem fs;
522 
523     public File(FileSystem fs) {
524       this.fs = fs;
525     }
526 
527     /**
528      * Delete the file
529      * @throws IOException on failure
530      */
531     abstract void delete() throws IOException;
532 
533     /**
534      * Check to see if this is a file or a directory
535      * @return <tt>true</tt> if it is a file, <tt>false</tt> otherwise
536      * @throws IOException on {@link FileSystem} connection error
537      */
538     abstract boolean isFile() throws IOException;
539 
540     /**
541      * @return if this is a directory, returns all the children in the
542      *         directory, otherwise returns an empty list
543      * @throws IOException
544      */
545     abstract Collection<File> getChildren() throws IOException;
546 
547     /**
548      * close any outside readers of the file
549      * @throws IOException
550      */
551     abstract void close() throws IOException;
552 
553     /**
554      * @return the name of the file (not the full fs path, just the individual
555      *         file name)
556      */
557     abstract String getName();
558 
559     /**
560      * @return the path to this file
561      */
562     abstract Path getPath();
563 
564     /**
565      * Move the file to the given destination
566      * @param dest
567      * @return <tt>true</tt> on success
568      * @throws IOException
569      */
570     public boolean moveAndClose(Path dest) throws IOException {
571       this.close();
572       Path p = this.getPath();
573       return FSUtils.renameAndSetModifyTime(fs, p, dest);
574     }
575 
576     /**
577      * @return the {@link FileSystem} on which this file resides
578      */
579     public FileSystem getFileSystem() {
580       return this.fs;
581     }
582 
583     @Override
584     public String toString() {
585       return this.getClass() + ", file:" + getPath().toString();
586     }
587   }
588 
589   /**
590    * A {@link File} that wraps a simple {@link Path} on a {@link FileSystem}.
591    */
592   private static class FileablePath extends File {
593     private final Path file;
594     private final FileStatusConverter getAsFile;
595 
596     public FileablePath(FileSystem fs, Path file) {
597       super(fs);
598       this.file = file;
599       this.getAsFile = new FileStatusConverter(fs);
600     }
601 
602     @Override
603     public void delete() throws IOException {
604       if (!fs.delete(file, true)) throw new IOException("Failed to delete:" + this.file);
605     }
606 
607     @Override
608     public String getName() {
609       return file.getName();
610     }
611 
612     @Override
613     public Collection<File> getChildren() throws IOException {
614       if (fs.isFile(file)) return Collections.emptyList();
615       return Collections2.transform(Arrays.asList(fs.listStatus(file)), getAsFile);
616     }
617 
618     @Override
619     public boolean isFile() throws IOException {
620       return fs.isFile(file);
621     }
622 
623     @Override
624     public void close() throws IOException {
625       // NOOP - files are implicitly closed on removal
626     }
627 
628     @Override
629     Path getPath() {
630       return file;
631     }
632   }
633 
634   /**
635    * {@link File} adapter for a {@link StoreFile} living on a {@link FileSystem}
636    * .
637    */
638   private static class FileableStoreFile extends File {
639     StoreFile file;
640 
641     public FileableStoreFile(FileSystem fs, StoreFile store) {
642       super(fs);
643       this.file = store;
644     }
645 
646     @Override
647     public void delete() throws IOException {
648       file.deleteReader();
649     }
650 
651     @Override
652     public String getName() {
653       return file.getPath().getName();
654     }
655 
656     @Override
657     public boolean isFile() {
658       return true;
659     }
660 
661     @Override
662     public Collection<File> getChildren() throws IOException {
663       // storefiles don't have children
664       return Collections.emptyList();
665     }
666 
667     @Override
668     public void close() throws IOException {
669       file.closeReader(true);
670     }
671 
672     @Override
673     Path getPath() {
674       return file.getPath();
675     }
676   }
677 }