View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.regionserver;
19  
20  import com.google.protobuf.Message;
21  import com.google.protobuf.RpcController;
22  import com.google.protobuf.Service;
23  import java.io.IOException;
24  import java.util.Collection;
25  import java.util.List;
26  import java.util.Map;
27  import org.apache.hadoop.hbase.Cell;
28  import org.apache.hadoop.hbase.HBaseInterfaceAudience;
29  import org.apache.hadoop.hbase.HDFSBlocksDistribution;
30  import org.apache.hadoop.hbase.HRegionInfo;
31  import org.apache.hadoop.hbase.HTableDescriptor;
32  import org.apache.hadoop.hbase.classification.InterfaceAudience;
33  import org.apache.hadoop.hbase.classification.InterfaceStability;
34  import org.apache.hadoop.hbase.client.Append;
35  import org.apache.hadoop.hbase.client.Delete;
36  import org.apache.hadoop.hbase.client.Get;
37  import org.apache.hadoop.hbase.client.Increment;
38  import org.apache.hadoop.hbase.client.IsolationLevel;
39  import org.apache.hadoop.hbase.client.Mutation;
40  import org.apache.hadoop.hbase.client.Put;
41  import org.apache.hadoop.hbase.client.Result;
42  import org.apache.hadoop.hbase.client.RowMutations;
43  import org.apache.hadoop.hbase.client.Scan;
44  import org.apache.hadoop.hbase.conf.ConfigurationObserver;
45  import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
46  import org.apache.hadoop.hbase.filter.ByteArrayComparable;
47  import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
48  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
49  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
50  import org.apache.hadoop.hbase.util.Pair;
51  import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
52  
53  /**
54   * Regions store data for a certain region of a table.  It stores all columns
55   * for each row. A given table consists of one or more Regions.
56   *
57   * <p>An Region is defined by its table and its key extent.
58   *
59   * <p>Locking at the Region level serves only one purpose: preventing the
60   * region from being closed (and consequently split) while other operations
61   * are ongoing. Each row level operation obtains both a row lock and a region
62   * read lock for the duration of the operation. While a scanner is being
63   * constructed, getScanner holds a read lock. If the scanner is successfully
64   * constructed, it holds a read lock until it is closed. A close takes out a
65   * write lock and consequently will block for ongoing operations and will block
66   * new operations from starting while the close is in progress.
67   */
68  @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
69  @InterfaceStability.Evolving
70  public interface Region extends ConfigurationObserver {
71  
72    ///////////////////////////////////////////////////////////////////////////
73    // Region state
74  
75    /** @return region information for this region */
76    HRegionInfo getRegionInfo();
77  
78    /** @return table descriptor for this region */
79    HTableDescriptor getTableDesc();
80  
81    /** @return true if region is available (not closed and not closing) */
82    boolean isAvailable();
83  
84    /** @return true if region is closed */
85    boolean isClosed();
86  
87    /** @return True if closing process has started */
88    boolean isClosing();
89  
90    /** @return True if region is in recovering state */
91    boolean isRecovering();
92  
93    /** @return True if region is read only */
94    boolean isReadOnly();
95  
96    /**
97     * Return the list of Stores managed by this region
98     * <p>Use with caution.  Exposed for use of fixup utilities.
99     * @return a list of the Stores managed by this region
100    */
101   List<Store> getStores();
102 
103   /**
104    * Return the Store for the given family
105    * <p>Use with caution.  Exposed for use of fixup utilities.
106    * @return the Store for the given family
107    */
108   Store getStore(byte[] family);
109 
110   /** @return list of store file names for the given families */
111   List<String> getStoreFileList(byte [][] columns);
112 
113   /**
114    * Check the region's underlying store files, open the files that have not
115    * been opened yet, and remove the store file readers for store files no
116    * longer available.
117    * @throws IOException
118    */
119   boolean refreshStoreFiles() throws IOException;
120 
121   /** @return the latest sequence number that was read from storage when this region was opened */
122   long getOpenSeqNum();
123 
124   /** @return the max sequence id of flushed data on this region; no edit in memory will have
125    * a sequence id that is less that what is returned here.
126    */
127   long getMaxFlushedSeqId();
128 
129   /** @return the oldest flushed sequence id for the given family; can be beyond
130    * {@link #getMaxFlushedSeqId()} in case where we've flushed a subset of a regions column
131    * families
132    * @deprecated Since version 1.2.0. Exposes too much about our internals; shutting it down.
133    * Do not use.
134    */
135   @InterfaceAudience.Private
136   @Deprecated
137   public long getOldestSeqIdOfStore(byte[] familyName);
138 
139   /**
140    * This can be used to determine the last time all files of this region were major compacted.
141    * @param majorCompactionOnly Only consider HFile that are the result of major compaction
142    * @return the timestamp of the oldest HFile for all stores of this region
143    */
144   long getOldestHfileTs(boolean majorCompactionOnly) throws IOException;
145 
146   /**
147    * @return map of column family names to max sequence id that was read from storage when this
148    * region was opened
149    */
150   public Map<byte[], Long> getMaxStoreSeqId();
151 
152   /** @return true if loading column families on demand by default */
153   boolean isLoadingCfsOnDemandDefault();
154 
155   /** @return readpoint considering given IsolationLevel */
156   long getReadpoint(IsolationLevel isolationLevel);
157 
158   /**
159    * @return The earliest time a store in the region was flushed. All
160    *         other stores in the region would have been flushed either at, or
161    *         after this time.
162    */
163   long getEarliestFlushTimeForAllStores();
164 
165   ///////////////////////////////////////////////////////////////////////////
166   // Metrics
167 
168   /** @return read requests count for this region */
169   long getReadRequestsCount();
170 
171   /**
172    * Update the read request count for this region
173    * @param i increment
174    */
175   void updateReadRequestsCount(long i);
176 
177   /** @return write request count for this region */
178   long getWriteRequestsCount();
179 
180   /**
181    * Update the write request count for this region
182    * @param i increment
183    */
184   void updateWriteRequestsCount(long i);
185 
186   /** @return memstore size for this region, in bytes */
187   long getMemstoreSize();
188 
189   /** @return the number of mutations processed bypassing the WAL */
190   long getNumMutationsWithoutWAL();
191 
192   /** @return the size of data processed bypassing the WAL, in bytes */
193   long getDataInMemoryWithoutWAL();
194 
195   /** @return the number of blocked requests */
196   long getBlockedRequestsCount();
197 
198   /** @return the number of checkAndMutate guards that passed */
199   long getCheckAndMutateChecksPassed();
200 
201   /** @return the number of failed checkAndMutate guards */
202   long getCheckAndMutateChecksFailed();
203 
204   /** @return the MetricsRegion for this region */
205   MetricsRegion getMetrics();
206 
207   /** @return the block distribution for all Stores managed by this region */
208   HDFSBlocksDistribution getHDFSBlocksDistribution();
209 
210   ///////////////////////////////////////////////////////////////////////////
211   // Locking
212 
213   // Region read locks
214 
215   /**
216    * Operation enum is used in {@link Region#startRegionOperation} to provide context for
217    * various checks before any region operation begins.
218    */
219   enum Operation {
220     ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE,
221     REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT, SNAPSHOT, COMPACT_SWITCH,
222     CHECK_AND_MUTATE
223   }
224 
225   /**
226    * This method needs to be called before any public call that reads or
227    * modifies data.
228    * Acquires a read lock and checks if the region is closing or closed.
229    * <p>{@link #closeRegionOperation} MUST then always be called after
230    * the operation has completed, whether it succeeded or failed.
231    * @throws IOException
232    */
233   void startRegionOperation() throws IOException;
234 
235   /**
236    * This method needs to be called before any public call that reads or
237    * modifies data.
238    * Acquires a read lock and checks if the region is closing or closed.
239    * <p>{@link #closeRegionOperation} MUST then always be called after
240    * the operation has completed, whether it succeeded or failed.
241    * @param op The operation is about to be taken on the region
242    * @throws IOException
243    */
244   void startRegionOperation(Operation op) throws IOException;
245 
246   /**
247    * Closes the region operation lock.
248    * @throws IOException
249    */
250   void closeRegionOperation() throws IOException;
251 
252   /**
253    * Closes the region operation lock. This needs to be called in the finally block corresponding
254    * to the try block of {@link #startRegionOperation(Operation)}
255    * @throws IOException
256    */
257   void closeRegionOperation(Operation op) throws IOException;
258 
259   // Row write locks
260 
261   /**
262    * Row lock held by a given thread.
263    * One thread may acquire multiple locks on the same row simultaneously.
264    * The locks must be released by calling release() from the same thread.
265    */
266   public interface RowLock {
267     /**
268      * Release the given lock.  If there are no remaining locks held by the current thread
269      * then unlock the row and allow other threads to acquire the lock.
270      * @throws IllegalArgumentException if called by a different thread than the lock owning
271      *     thread
272      */
273     void release();
274   }
275 
276   /**
277    *
278    * Get a row lock for the specified row. All locks are reentrant.
279    *
280    * Before calling this function make sure that a region operation has already been
281    * started (the calling thread has already acquired the region-close-guard lock).
282    * 
283    * NOTE: the boolean passed here has changed. It used to be a boolean that
284    * stated whether or not to wait on the lock. Now it is whether it an exclusive
285    * lock is requested.
286    * 
287    * @param row The row actions will be performed against
288    * @param readLock is the lock reader or writer. True indicates that a non-exclusive
289    * lock is requested
290    * @see #startRegionOperation()
291    * @see #startRegionOperation(Operation)
292    */
293   RowLock getRowLock(byte[] row, boolean readLock) throws IOException;
294 
295   /**
296    * If the given list of row locks is not null, releases all locks.
297    */
298   void releaseRowLocks(List<RowLock> rowLocks);
299 
300   ///////////////////////////////////////////////////////////////////////////
301   // Region operations
302 
303   /**
304    * Perform one or more append operations on a row.
305    * @param append
306    * @param nonceGroup
307    * @param nonce
308    * @return result of the operation
309    * @throws IOException
310    */
311   Result append(Append append, long nonceGroup, long nonce) throws IOException;
312 
313   /**
314    * Perform a batch of mutations.
315    * <p>
316    * Note this supports only Put and Delete mutations and will ignore other types passed.
317    * @param mutations the list of mutations
318    * @param nonceGroup
319    * @param nonce
320    * @return an array of OperationStatus which internally contains the
321    *         OperationStatusCode and the exceptionMessage if any.
322    * @throws IOException
323    */
324   OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce)
325       throws IOException;
326 
327   /**
328    * Replay a batch of mutations.
329    * @param mutations mutations to replay.
330    * @param replaySeqId
331    * @return an array of OperationStatus which internally contains the
332    *         OperationStatusCode and the exceptionMessage if any.
333    * @throws IOException
334    */
335    OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException;
336 
337   /**
338    * Atomically checks if a row/family/qualifier value matches the expected val
339    * If it does, it performs the row mutations.  If the passed value is null, t
340    * is for the lack of column (ie: non-existence)
341    * @param row to check
342    * @param family column family to check
343    * @param qualifier column qualifier to check
344    * @param compareOp the comparison operator
345    * @param comparator
346    * @param mutation
347    * @param writeToWAL
348    * @return true if mutation was applied, false otherwise
349    * @throws IOException
350    */
351   boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp,
352       ByteArrayComparable comparator, Mutation mutation, boolean writeToWAL) throws IOException;
353 
354   /**
355    * Atomically checks if a row/family/qualifier value matches the expected val
356    * If it does, it performs the row mutations.  If the passed value is null, t
357    * is for the lack of column (ie: non-existence)
358    * @param row to check
359    * @param family column family to check
360    * @param qualifier column qualifier to check
361    * @param compareOp the comparison operator
362    * @param comparator
363    * @param mutations
364    * @param writeToWAL
365    * @return true if mutation was applied, false otherwise
366    * @throws IOException
367    */
368   boolean checkAndRowMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp,
369       ByteArrayComparable comparator, RowMutations mutations, boolean writeToWAL)
370       throws IOException;
371 
372   /**
373    * Deletes the specified cells/row.
374    * @param delete
375    * @throws IOException
376    */
377   void delete(Delete delete) throws IOException;
378 
379   /**
380    * Do a get based on the get parameter.
381    * @param get query parameters
382    * @return result of the operation
383    */
384   Result get(Get get) throws IOException;
385 
386   /**
387    * Do a get based on the get parameter.
388    * @param get query parameters
389    * @param withCoprocessor invoke coprocessor or not. We don't want to
390    * always invoke cp.
391    * @return list of cells resulting from the operation
392    */
393   List<Cell> get(Get get, boolean withCoprocessor) throws IOException;
394 
395   /**
396    * Do a get for duplicate non-idempotent operation.
397    * @param get query parameters.
398    * @param withCoprocessor
399    * @param nonceGroup Nonce group.
400    * @param nonce Nonce.
401    * @return list of cells resulting from the operation
402    * @throws IOException
403    */
404   List<Cell> get(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException;
405 
406   /**
407    * Return all the data for the row that matches <i>row</i> exactly,
408    * or the one that immediately preceeds it, at or immediately before
409    * <i>ts</i>.
410    * @param row
411    * @param family
412    * @return result of the operation
413    * @throws IOException
414    */
415   Result getClosestRowBefore(byte[] row, byte[] family) throws IOException;
416 
417   /**
418    * Return an iterator that scans over the HRegion, returning the indicated
419    * columns and rows specified by the {@link Scan}.
420    * <p>
421    * This Iterator must be closed by the caller.
422    *
423    * @param scan configured {@link Scan}
424    * @return RegionScanner
425    * @throws IOException read exceptions
426    */
427   RegionScanner getScanner(Scan scan) throws IOException;
428 
429   /**
430    * Return an iterator that scans over the HRegion, returning the indicated columns and rows
431    * specified by the {@link Scan}. The scanner will also include the additional scanners passed
432    * along with the scanners for the specified Scan instance. Should be careful with the usage to
433    * pass additional scanners only within this Region
434    * <p>
435    * This Iterator must be closed by the caller.
436    *
437    * @param scan configured {@link Scan}
438    * @param additionalScanners Any additional scanners to be used
439    * @return RegionScanner
440    * @throws IOException read exceptions
441    */
442   RegionScanner getScanner(Scan scan, List<KeyValueScanner> additionalScanners) throws IOException;
443 
444   /**
445    * Perform one or more increment operations on a row.
446    * @param increment
447    * @param nonceGroup
448    * @param nonce
449    * @return result of the operation
450    * @throws IOException
451    */
452   Result increment(Increment increment, long nonceGroup, long nonce) throws IOException;
453 
454   /**
455    * Performs multiple mutations atomically on a single row. Currently
456    * {@link Put} and {@link Delete} are supported.
457    *
458    * @param mutations object that specifies the set of mutations to perform atomically
459    * @throws IOException
460    */
461   void mutateRow(RowMutations mutations) throws IOException;
462 
463   /**
464    * Perform atomic mutations within the region.
465    *
466    * @param mutations The list of mutations to perform.
467    * <code>mutations</code> can contain operations for multiple rows.
468    * Caller has to ensure that all rows are contained in this region.
469    * @param rowsToLock Rows to lock
470    * @param nonceGroup Optional nonce group of the operation (client Id)
471    * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence")
472    * If multiple rows are locked care should be taken that
473    * <code>rowsToLock</code> is sorted in order to avoid deadlocks.
474    * @throws IOException
475    */
476   void mutateRowsWithLocks(Collection<Mutation> mutations, Collection<byte[]> rowsToLock,
477       long nonceGroup, long nonce) throws IOException;
478 
479   /**
480    * Performs atomic multiple reads and writes on a given row.
481    *
482    * @param processor The object defines the reads and writes to a row.
483    */
484   void processRowsWithLocks(RowProcessor<?,?> processor) throws IOException;
485 
486   /**
487    * Performs atomic multiple reads and writes on a given row.
488    *
489    * @param processor The object defines the reads and writes to a row.
490    * @param nonceGroup Optional nonce group of the operation (client Id)
491    * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence")
492    */
493   void processRowsWithLocks(RowProcessor<?,?> processor, long nonceGroup, long nonce)
494       throws IOException;
495 
496   /**
497    * Performs atomic multiple reads and writes on a given row.
498    *
499    * @param processor The object defines the reads and writes to a row.
500    * @param timeout The timeout of the processor.process() execution
501    *                Use a negative number to switch off the time bound
502    * @param nonceGroup Optional nonce group of the operation (client Id)
503    * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence")
504    */
505   void processRowsWithLocks(RowProcessor<?,?> processor, long timeout, long nonceGroup, long nonce)
506       throws IOException;
507 
508   /**
509    * Puts some data in the table.
510    * @param put
511    * @throws IOException
512    */
513   void put(Put put) throws IOException;
514 
515   /**
516    * Listener class to enable callers of
517    * bulkLoadHFile() to perform any necessary
518    * pre/post processing of a given bulkload call
519    */
520   interface BulkLoadListener {
521 
522     /**
523      * Called before an HFile is actually loaded
524      * @param family family being loaded to
525      * @param srcPath path of HFile
526      * @return final path to be used for actual loading
527      * @throws IOException
528      */
529     String prepareBulkLoad(byte[] family, String srcPath) throws IOException;
530 
531     /**
532      * Called after a successful HFile load
533      * @param family family being loaded to
534      * @param srcPath path of HFile
535      * @throws IOException
536      */
537     void doneBulkLoad(byte[] family, String srcPath) throws IOException;
538 
539     /**
540      * Called after a failed HFile load
541      * @param family family being loaded to
542      * @param srcPath path of HFile
543      * @throws IOException
544      */
545     void failedBulkLoad(byte[] family, String srcPath) throws IOException;
546   }
547 
548   /**
549    * Attempts to atomically load a group of hfiles.  This is critical for loading
550    * rows with multiple column families atomically.
551    *
552    * @param familyPaths List of Pair&lt;byte[] column family, String hfilePath&gt;
553    * @param assignSeqId
554    * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
555    * file about to be bulk loaded
556    * @param clusterIds
557    * @return true if successful, false if failed recoverably
558    * @throws IOException if failed unrecoverably.
559    */
560   boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId,
561     BulkLoadListener bulkLoadListener, List<String> clusterIds) throws IOException;
562 
563   /**
564    * Attempts to atomically load a group of hfiles.  This is critical for loading
565    * rows with multiple column families atomically. Deprecated - do not use.
566    *
567    * @param familyPaths List of Pair&lt;byte[] column family, String hfilePath&gt;
568    * @param assignSeqId
569    * @param bulkLoadListener Internal hooks enabling massaging/preparation of a
570    * file about to be bulk loaded
571    * @return true if successful, false if failed recoverably
572    * @throws IOException if failed unrecoverably.
573    * @deprecated Do not use, see HBASE-22380
574    */
575   @Deprecated
576   boolean bulkLoadHFiles(Collection<Pair<byte[], String>> familyPaths, boolean assignSeqId,
577     BulkLoadListener bulkLoadListener) throws IOException;
578 
579   ///////////////////////////////////////////////////////////////////////////
580   // Coprocessors
581 
582   /** @return the coprocessor host */
583   RegionCoprocessorHost getCoprocessorHost();
584 
585   /**
586    * Executes a single protocol buffer coprocessor endpoint {@link Service} method using
587    * the registered protocol handlers.  {@link Service} implementations must be registered via the
588    * {@link Region#registerService(com.google.protobuf.Service)}
589    * method before they are available.
590    *
591    * @param controller an {@code RpcContoller} implementation to pass to the invoked service
592    * @param call a {@code CoprocessorServiceCall} instance identifying the service, method,
593    *     and parameters for the method invocation
594    * @return a protocol buffer {@code Message} instance containing the method's result
595    * @throws IOException if no registered service handler is found or an error
596    *     occurs during the invocation
597    * @see org.apache.hadoop.hbase.regionserver.Region#registerService(com.google.protobuf.Service)
598    */
599   Message execService(RpcController controller, CoprocessorServiceCall call) throws IOException;
600 
601   /**
602    * Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to
603    * be available for handling
604    * {@link Region#execService(com.google.protobuf.RpcController,
605    *    org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall)}} calls.
606    *
607    * <p>
608    * Only a single instance may be registered per region for a given {@link Service} subclass (the
609    * instances are keyed on {@link com.google.protobuf.Descriptors.ServiceDescriptor#getFullName()}.
610    * After the first registration, subsequent calls with the same service name will fail with
611    * a return value of {@code false}.
612    * </p>
613    * @param instance the {@code Service} subclass instance to expose as a coprocessor endpoint
614    * @return {@code true} if the registration was successful, {@code false}
615    * otherwise
616    */
617   boolean registerService(Service instance);
618 
619   ///////////////////////////////////////////////////////////////////////////
620   // RowMutation processor support
621 
622   /**
623    * Check the collection of families for validity.
624    * @param families
625    * @throws NoSuchColumnFamilyException
626    */
627   void checkFamilies(Collection<byte[]> families) throws NoSuchColumnFamilyException;
628 
629   /**
630    * Check the collection of families for valid timestamps
631    * @param familyMap
632    * @param now current timestamp
633    * @throws FailedSanityCheckException
634    */
635   void checkTimestamps(Map<byte[], List<Cell>> familyMap, long now)
636       throws FailedSanityCheckException;
637 
638   /**
639    * Prepare a delete for a row mutation processor
640    * @param delete The passed delete is modified by this method. WARNING!
641    * @throws IOException
642    */
643   void prepareDelete(Delete delete) throws IOException;
644 
645   /**
646    * Set up correct timestamps in the KVs in Delete object.
647    * <p>Caller should have the row and region locks.
648    * @param mutation
649    * @param familyCellMap
650    * @param now
651    * @throws IOException
652    */
653   void prepareDeleteTimestamps(Mutation mutation, Map<byte[], List<Cell>> familyCellMap,
654       byte[] now) throws IOException;
655 
656   /**
657    * Replace any cell timestamps set to HConstants#LATEST_TIMESTAMP with the
658    * provided current timestamp.
659    * @param values
660    * @param now
661    */
662   void updateCellTimestamps(final Iterable<List<Cell>> values, final byte[] now)
663       throws IOException;
664 
665   ///////////////////////////////////////////////////////////////////////////
666   // Flushes, compactions, splits, etc.
667   // Wizards only, please
668 
669   interface FlushResult {
670     enum Result {
671       FLUSHED_NO_COMPACTION_NEEDED,
672       FLUSHED_COMPACTION_NEEDED,
673       // Special case where a flush didn't run because there's nothing in the memstores. Used when
674       // bulk loading to know when we can still load even if a flush didn't happen.
675       CANNOT_FLUSH_MEMSTORE_EMPTY,
676       CANNOT_FLUSH
677     }
678 
679     /** @return the detailed result code */
680     Result getResult();
681 
682     /** @return true if the memstores were flushed, else false */
683     boolean isFlushSucceeded();
684 
685     /** @return True if the flush requested a compaction, else false */
686     boolean isCompactionNeeded();
687   }
688 
689   /**
690    * Flush the cache.
691    *
692    * <p>When this method is called the cache will be flushed unless:
693    * <ol>
694    *   <li>the cache is empty</li>
695    *   <li>the region is closed.</li>
696    *   <li>a flush is already in progress</li>
697    *   <li>writes are disabled</li>
698    * </ol>
699    *
700    * <p>This method may block for some time, so it should not be called from a
701    * time-sensitive thread.
702    * @param force whether we want to force a flush of all stores
703    * @return FlushResult indicating whether the flush was successful or not and if
704    * the region needs compacting
705    *
706    * @throws IOException general io exceptions
707    * because a snapshot was not properly persisted.
708    */
709   FlushResult flush(boolean force) throws IOException;
710 
711   /**
712    * Synchronously compact all stores in the region.
713    * <p>This operation could block for a long time, so don't call it from a
714    * time-sensitive thread.
715    * <p>Note that no locks are taken to prevent possible conflicts between
716    * compaction and splitting activities. The regionserver does not normally compact
717    * and split in parallel. However by calling this method you may introduce
718    * unexpected and unhandled concurrency. Don't do this unless you know what
719    * you are doing.
720    *
721    * @param majorCompaction True to force a major compaction regardless of thresholds
722    * @throws IOException
723    */
724   void compact(final boolean majorCompaction) throws IOException;
725 
726   /**
727    * Trigger major compaction on all stores in the region.
728    * <p>
729    * Compaction will be performed asynchronously to this call by the RegionServer's
730    * CompactSplitThread. See also {@link Store#triggerMajorCompaction()}
731    * @throws IOException
732    */
733   void triggerMajorCompaction() throws IOException;
734 
735   /**
736    * @return if a given region is in compaction now.
737    */
738   CompactionState getCompactionState();
739 
740   /** Wait for all current flushes and compactions of the region to complete */
741   void waitForFlushesAndCompactions();
742 
743   /** Wait for all current flushes of the region to complete
744    */
745   void waitForFlushes();
746 }