View Javadoc

1   /**
2    *
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.hbase.regionserver;
20  
21  import com.google.common.cache.Cache;
22  import com.google.common.cache.CacheBuilder;
23  import com.google.protobuf.ByteString;
24  import com.google.protobuf.Message;
25  import com.google.protobuf.RpcController;
26  import com.google.protobuf.ServiceException;
27  import com.google.protobuf.TextFormat;
28  
29  import java.io.FileNotFoundException;
30  import java.io.IOException;
31  import java.io.InterruptedIOException;
32  import java.lang.reflect.InvocationTargetException;
33  import java.lang.reflect.Method;
34  import java.net.BindException;
35  import java.net.InetSocketAddress;
36  import java.net.UnknownHostException;
37  import java.util.ArrayList;
38  import java.util.Arrays;
39  import java.util.Collections;
40  import java.util.HashMap;
41  import java.util.Iterator;
42  import java.util.List;
43  import java.util.Map;
44  import java.util.Map.Entry;
45  import java.util.NavigableMap;
46  import java.util.Set;
47  import java.util.TreeSet;
48  import java.util.concurrent.ConcurrentHashMap;
49  import java.util.concurrent.ConcurrentMap;
50  import java.util.concurrent.TimeUnit;
51  import java.util.concurrent.atomic.AtomicLong;
52  
53  import org.apache.commons.lang.mutable.MutableObject;
54  import org.apache.commons.logging.Log;
55  import org.apache.commons.logging.LogFactory;
56  import org.apache.hadoop.conf.Configuration;
57  import org.apache.hadoop.hbase.Cell;
58  import org.apache.hadoop.hbase.CellScannable;
59  import org.apache.hadoop.hbase.CellScanner;
60  import org.apache.hadoop.hbase.CellUtil;
61  import org.apache.hadoop.hbase.DoNotRetryIOException;
62  import org.apache.hadoop.hbase.DroppedSnapshotException;
63  import org.apache.hadoop.hbase.HBaseIOException;
64  import org.apache.hadoop.hbase.HConstants;
65  import org.apache.hadoop.hbase.HRegionInfo;
66  import org.apache.hadoop.hbase.HTableDescriptor;
67  import org.apache.hadoop.hbase.MetaTableAccessor;
68  import org.apache.hadoop.hbase.MultiActionResultTooLarge;
69  import org.apache.hadoop.hbase.NotServingRegionException;
70  import org.apache.hadoop.hbase.ServerName;
71  import org.apache.hadoop.hbase.TableName;
72  import org.apache.hadoop.hbase.UnknownScannerException;
73  import org.apache.hadoop.hbase.classification.InterfaceAudience;
74  import org.apache.hadoop.hbase.client.Append;
75  import org.apache.hadoop.hbase.client.ConnectionUtils;
76  import org.apache.hadoop.hbase.client.Delete;
77  import org.apache.hadoop.hbase.client.Durability;
78  import org.apache.hadoop.hbase.client.Get;
79  import org.apache.hadoop.hbase.client.Increment;
80  import org.apache.hadoop.hbase.client.Mutation;
81  import org.apache.hadoop.hbase.client.Put;
82  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
83  import org.apache.hadoop.hbase.client.Result;
84  import org.apache.hadoop.hbase.client.RowMutations;
85  import org.apache.hadoop.hbase.client.Scan;
86  import org.apache.hadoop.hbase.client.VersionInfoUtil;
87  import org.apache.hadoop.hbase.conf.ConfigurationObserver;
88  import org.apache.hadoop.hbase.coordination.CloseRegionCoordination;
89  import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
90  import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
91  import org.apache.hadoop.hbase.exceptions.MergeRegionException;
92  import org.apache.hadoop.hbase.exceptions.OperationConflictException;
93  import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
94  import org.apache.hadoop.hbase.exceptions.ScannerResetException;
95  import org.apache.hadoop.hbase.filter.ByteArrayComparable;
96  import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
97  import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
98  import org.apache.hadoop.hbase.ipc.HBaseRpcController;
99  import org.apache.hadoop.hbase.ipc.PriorityFunction;
100 import org.apache.hadoop.hbase.ipc.QosPriority;
101 import org.apache.hadoop.hbase.ipc.RpcCallContext;
102 import org.apache.hadoop.hbase.ipc.RpcServer;
103 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
104 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
105 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
106 import org.apache.hadoop.hbase.ipc.ServerRpcController;
107 import org.apache.hadoop.hbase.master.HMaster;
108 import org.apache.hadoop.hbase.master.MasterRpcServices;
109 import org.apache.hadoop.hbase.namequeues.NamedQueuePayload;
110 import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder;
111 import org.apache.hadoop.hbase.namequeues.RpcLogDetails;
112 import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest;
113 import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse;
114 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
115 import org.apache.hadoop.hbase.protobuf.RequestConverter;
116 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
117 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
118 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearSlowLogResponseRequest;
119 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ClearSlowLogResponses;
120 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
121 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
122 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
123 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse;
124 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchRequest;
125 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactionSwitchResponse;
126 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest;
127 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse;
128 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
129 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
130 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
131 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
132 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest;
133 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
134 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
135 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse;
136 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
137 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse;
138 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
139 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
140 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
141 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
142 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
143 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
144 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest;
145 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
146 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SlowLogResponseRequest;
147 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SlowLogResponses;
148 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
149 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse;
150 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
151 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse;
152 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest;
153 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse;
154 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
155 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
156 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
157 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
158 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
159 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
160 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action;
161 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
162 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
163 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
164 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
165 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
166 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
167 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
168 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
169 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
170 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRegionLoadStats;
171 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
172 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
173 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
174 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
175 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
176 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
177 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
178 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
179 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
180 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
181 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
182 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
183 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
184 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
185 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
186 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
187 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
188 import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
189 import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
190 import org.apache.hadoop.hbase.protobuf.generated.TooSlowLog.SlowLogPayload;
191 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
192 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
193 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
194 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
195 import org.apache.hadoop.hbase.quotas.OperationQuota;
196 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
197 import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
198 import org.apache.hadoop.hbase.regionserver.Region.Operation;
199 import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
200 import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
201 import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
202 import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
203 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
204 import org.apache.hadoop.hbase.security.User;
205 import org.apache.hadoop.hbase.security.access.AccessChecker;
206 import org.apache.hadoop.hbase.security.access.Permission;
207 import org.apache.hadoop.hbase.util.Bytes;
208 import org.apache.hadoop.hbase.util.Counter;
209 import org.apache.hadoop.hbase.util.DNS;
210 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
211 import org.apache.hadoop.hbase.util.Pair;
212 import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
213 import org.apache.hadoop.hbase.util.Strings;
214 import org.apache.hadoop.hbase.wal.WAL;
215 import org.apache.hadoop.hbase.wal.WALKey;
216 import org.apache.hadoop.hbase.wal.WALSplitter;
217 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
218 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
219 import org.apache.zookeeper.KeeperException;
220 
221 /**
222  * Implements the regionserver RPC services.
223  */
224 @InterfaceAudience.Private
225 @SuppressWarnings("deprecation")
226 public class RSRpcServices implements HBaseRPCErrorHandler,
227     AdminService.BlockingInterface, ClientService.BlockingInterface, PriorityFunction,
228     ConfigurationObserver {
229   protected static final Log LOG = LogFactory.getLog(RSRpcServices.class);
230 
231   /** RPC scheduler to use for the region server. */
232   public static final String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS =
233     "hbase.region.server.rpc.scheduler.factory.class";
234 
235   /**
236    * Minimum allowable time limit delta (in milliseconds) that can be enforced during scans. This
237    * configuration exists to prevent the scenario where a time limit is specified to be so
238    * restrictive that the time limit is reached immediately (before any cells are scanned).
239    */
240   private static final String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA =
241       "hbase.region.server.rpc.minimum.scan.time.limit.delta";
242   /**
243    * Default value of {@link RSRpcServices#REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA}
244    */
245   private static final long DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA = 10;
246 
247   /*
248    * Whether to reject rows with size > threshold defined by
249    * {@link RSRpcServices#BATCH_ROWS_THRESHOLD_NAME}
250    */
251   private static final String REJECT_BATCH_ROWS_OVER_THRESHOLD =
252     "hbase.rpc.rows.size.threshold.reject";
253 
254   /*
255    * Default value of config {@link RSRpcServices#REJECT_BATCH_ROWS_OVER_THRESHOLD}
256    */
257   private static final boolean DEFAULT_REJECT_BATCH_ROWS_OVER_THRESHOLD = false;
258 
259   // Request counter. (Includes requests that are not serviced by regions.)
260   // Count only once for requests with multiple actions like multi/caching-scan/replayBatch
261   final Counter requestCount = new Counter();
262 
263   // Request counter for rpc get
264   final Counter rpcGetRequestCount = new Counter();
265 
266   // Request counter for rpc scan
267   final Counter rpcScanRequestCount = new Counter();
268 
269   // Request counter for rpc multi
270   final Counter rpcMultiRequestCount = new Counter();
271 
272   // Request counter for rpc mutate
273   final Counter rpcMutateRequestCount = new Counter();
274 
275   // Server to handle client requests.
276   final RpcServerInterface rpcServer;
277   final InetSocketAddress isa;
278 
279   protected final HRegionServer regionServer;
280   private final long maxScannerResultSize;
281 
282   // The reference to the priority extraction function
283   private final PriorityFunction priority;
284 
285   private ScannerIdGenerator scannerIdGenerator;
286   private final ConcurrentMap<String, RegionScannerHolder> scanners = new ConcurrentHashMap<>();
287   // Hold the name of a closed scanner for a while. This is used to keep compatible for old clients
288   // which may send next or close request to a region scanner which has already been exhausted. The
289   // entries will be removed automatically after scannerLeaseTimeoutPeriod.
290   private final Cache<String, String> closedScanners;
291   /**
292    * The lease timeout period for client scanners (milliseconds).
293    */
294   private final int scannerLeaseTimeoutPeriod;
295 
296   /**
297    * The RPC timeout period (milliseconds)
298    */
299   private final int rpcTimeout;
300 
301   /**
302    * The minimum allowable delta to use for the scan limit
303    */
304   private final long minimumScanTimeLimitDelta;
305 
306   /**
307    * Row size threshold for multi requests above which a warning is logged
308    */
309   private final int rowSizeWarnThreshold;
310   /*
311    * Whether we should reject requests with very high no of rows i.e. beyond threshold
312    * defined by rowSizeWarnThreshold
313    */
314   private final boolean rejectRowsWithSizeOverThreshold;
315 
316   // We want to vet all accesses at the point of entry itself; limiting scope of access checker
317   // instance to only this class to prevent its use from spreading deeper into implementation.
318   // Initialized in start() since AccessChecker needs ZKWatcher which is created by HRegionServer
319   // after RSRpcServices constructor and before start() is called.
320   // Initialized only if authorization is enabled, else remains null.
321   private AccessChecker accessChecker;
322 
323   /**
324    * Holder class which holds the RegionScanner, nextCallSeq and RpcCallbacks together.
325    */
326   private static final class RegionScannerHolder {
327 
328     private final AtomicLong nextCallSeq = new AtomicLong(0);
329     private final String scannerName;
330     private final RegionScanner s;
331     private final Region r;
332     private byte[] rowOfLastPartialResult;
333     private boolean needCursor;
334 
335     public RegionScannerHolder(String scannerName, RegionScanner s, Region r, boolean needCursor) {
336       this.scannerName = scannerName;
337       this.s = s;
338       this.r = r;
339       this.needCursor = needCursor;
340     }
341 
342     public long getNextCallSeq() {
343       return nextCallSeq.get();
344     }
345 
346     public boolean incNextCallSeq(long currentSeq) {
347       // Use CAS to prevent multiple scan request running on the same scanner.
348       return nextCallSeq.compareAndSet(currentSeq, currentSeq + 1);
349     }
350   }
351 
352   /**
353    * Instantiated as a scanner lease. If the lease times out, the scanner is
354    * closed
355    */
356   private class ScannerListener implements LeaseListener {
357     private final String scannerName;
358 
359     ScannerListener(final String n) {
360       this.scannerName = n;
361     }
362 
363     @Override
364     public void leaseExpired() {
365       RegionScannerHolder rsh = scanners.remove(this.scannerName);
366       if (rsh != null) {
367         RegionScanner s = rsh.s;
368         LOG.info("Scanner " + this.scannerName + " lease expired on region "
369           + s.getRegionInfo().getRegionNameAsString());
370         Region region = null;
371         try {
372           region = regionServer.getRegion(s.getRegionInfo().getRegionName());
373           if (region != null && region.getCoprocessorHost() != null) {
374             region.getCoprocessorHost().preScannerClose(s);
375           }
376         } catch (IOException e) {
377           LOG.error("Closing scanner for " + s.getRegionInfo().getRegionNameAsString(), e);
378         } finally {
379           try {
380             s.close();
381             if (region != null && region.getCoprocessorHost() != null) {
382               region.getCoprocessorHost().postScannerClose(s);
383             }
384           } catch (IOException e) {
385             LOG.error("Closing scanner for " + s.getRegionInfo().getRegionNameAsString(), e);
386           }
387         }
388       } else {
389         LOG.warn("Scanner " + this.scannerName + " lease expired, but no related" +
390           " scanner found, hence no chance to close that related scanner!");
391       }
392     }
393   }
394 
395   private static ResultOrException getResultOrException(final ClientProtos.Result r,
396                                                         final int index){
397     return getResultOrException(ResponseConverter.buildActionResult(r), index);
398   }
399 
400   private static ResultOrException getResultOrException(final Exception e, final int index) {
401     return getResultOrException(ResponseConverter.buildActionResult(e), index);
402   }
403 
404   private static ResultOrException getResultOrException(
405       final ResultOrException.Builder builder, final int index) {
406     return builder.setIndex(index).build();
407   }
408 
409   /**
410    * Checks for the following pre-checks in order:
411    * <ol>
412    *   <li>RegionServer is running</li>
413    *   <li>If authorization is enabled, then RPC caller has ADMIN permissions</li>
414    * </ol>
415    * @param requestName name of rpc request. Used in reporting failures to provide context.
416    * @throws ServiceException If any of the above listed pre-check fails.
417    */
418   private void rpcPreCheck(String requestName) throws ServiceException {
419     try {
420       checkOpen();
421       requirePermission(requestName, Permission.Action.ADMIN);
422     } catch (IOException ioe) {
423       throw new ServiceException(ioe);
424     }
425   }
426 
427   /**
428    * Starts the nonce operation for a mutation, if needed.
429    * @param mutation Mutation.
430    * @param nonceGroup Nonce group from the request.
431    * @return whether to proceed this mutation.
432    */
433   private boolean startNonceOperation(final MutationProto mutation, long nonceGroup)
434       throws IOException, OperationConflictException {
435     if (regionServer.nonceManager == null || !mutation.hasNonce()) return true;
436     boolean canProceed = false;
437     try {
438       canProceed = regionServer.nonceManager.startOperation(
439         nonceGroup, mutation.getNonce(), regionServer);
440     } catch (InterruptedException ex) {
441       throw new InterruptedIOException("Nonce start operation interrupted");
442     }
443     return canProceed;
444   }
445 
446   /**
447    * Ends nonce operation for a mutation, if needed.
448    * @param mutation Mutation.
449    * @param nonceGroup Nonce group from the request. Always 0 in initial implementation.
450    * @param success Whether the operation for this nonce has succeeded.
451    */
452   private void endNonceOperation(final MutationProto mutation,
453       long nonceGroup, boolean success) {
454     if (regionServer.nonceManager != null && mutation.hasNonce()) {
455       regionServer.nonceManager.endOperation(nonceGroup, mutation.getNonce(), success);
456     }
457   }
458 
459   /**
460    * @return True if current call supports cellblocks
461    */
462   private boolean isClientCellBlockSupport() {
463     return isClientCellBlockSupport(RpcServer.getCurrentCall());
464   }
465 
466   private boolean isClientCellBlockSupport(RpcCallContext context) {
467     return context != null && context.isClientCellBlockSupported();
468   }
469 
470   private void addResult(final MutateResponse.Builder builder,
471       final Result result, final HBaseRpcController rpcc) {
472     if (result == null) return;
473     if (isClientCellBlockSupport()) {
474       builder.setResult(ProtobufUtil.toResultNoData(result));
475       rpcc.setCellScanner(result.cellScanner());
476     } else {
477       ClientProtos.Result pbr = ProtobufUtil.toResult(result);
478       builder.setResult(pbr);
479     }
480   }
481 
482   private void addResults(final ScanResponse.Builder builder, final List<Result> results,
483       final HBaseRpcController controller, boolean isDefaultRegion) {
484     builder.setStale(!isDefaultRegion);
485     if (results.isEmpty()) {
486       return;
487     }
488     if (isClientCellBlockSupport()) {
489       for (Result res : results) {
490         builder.addCellsPerResult(res.size());
491         builder.addPartialFlagPerResult(res.mayHaveMoreCellsInRow());
492       }
493       controller.setCellScanner(CellUtil.createCellScanner(results));
494     } else {
495       for (Result res : results) {
496         ClientProtos.Result pbr = ProtobufUtil.toResult(res);
497         builder.addResults(pbr);
498       }
499     }
500   }
501 
502   /**
503    * Mutate a list of rows atomically.
504    *
505    * @param region
506    * @param actions
507    * @param cellScanner if non-null, the mutation data -- the Cell content.
508    * @throws IOException
509    */
510   private void mutateRows(final Region region,
511       final List<ClientProtos.Action> actions,
512       final CellScanner cellScanner, RegionActionResult.Builder builder) throws IOException {
513     int countOfCompleteMutation = 0;
514     try {
515       if (!region.getRegionInfo().isMetaTable()) {
516         regionServer.cacheFlusher.reclaimMemStoreMemory();
517       }
518       RowMutations rm = null;
519       int i = 0;
520       ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder =
521         ClientProtos.ResultOrException.newBuilder();
522       for (ClientProtos.Action action: actions) {
523         if (action.hasGet()) {
524           throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" +
525             action.getGet());
526         }
527         MutationType type = action.getMutation().getMutateType();
528         if (rm == null) {
529           rm = new RowMutations(action.getMutation().getRow().toByteArray());
530         }
531         switch (type) {
532           case PUT:
533             Put put = ProtobufUtil.toPut(action.getMutation(), cellScanner);
534             ++countOfCompleteMutation;
535             checkCellSizeLimit(region, put);
536             rm.add(put);
537             break;
538           case DELETE:
539             Delete delete = ProtobufUtil.toDelete(action.getMutation(), cellScanner);
540             ++countOfCompleteMutation;
541             rm.add(delete);
542             break;
543           default:
544             throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
545         }
546         // To unify the response format with doNonAtomicRegionMutation and read through client's
547         // AsyncProcess we have to add an empty result instance per operation
548         resultOrExceptionOrBuilder.clear();
549         resultOrExceptionOrBuilder.setIndex(i++);
550         builder.addResultOrException(
551           resultOrExceptionOrBuilder.build());
552       }
553       region.mutateRow(rm);
554     } finally {
555       // Currently, the checkAndMutate isn't supported by batch so it won't mess up the cell scanner
556       // even if the malformed cells are not skipped.
557       for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
558         skipCellsForMutation(actions.get(i), cellScanner);
559       }
560     }
561   }
562 
563   /**
564    * Mutate a list of rows atomically.
565    *
566    * @param region
567    * @param actions
568    * @param cellScanner if non-null, the mutation data -- the Cell content.
569    * @param row
570    * @param family
571    * @param qualifier
572    * @param compareOp
573    * @param comparator @throws IOException
574    */
575   private boolean checkAndRowMutate(final Region region, final List<ClientProtos.Action> actions,
576       final CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier,
577       CompareOp compareOp, ByteArrayComparable comparator,
578                                     RegionActionResult.Builder builder) throws IOException {
579     int countOfCompleteMutation = 0;
580     try {
581       if (!region.getRegionInfo().isMetaTable()) {
582         regionServer.cacheFlusher.reclaimMemStoreMemory();
583       }
584       RowMutations rm = null;
585       int i = 0;
586       ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder =
587         ClientProtos.ResultOrException.newBuilder();
588       for (ClientProtos.Action action: actions) {
589         if (action.hasGet()) {
590           throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" +
591             action.getGet());
592         }
593         MutationType type = action.getMutation().getMutateType();
594         if (rm == null) {
595           rm = new RowMutations(action.getMutation().getRow().toByteArray());
596         }
597         switch (type) {
598           case PUT:
599             Put put = ProtobufUtil.toPut(action.getMutation(), cellScanner);
600             ++countOfCompleteMutation;
601             checkCellSizeLimit(region, put);
602             rm.add(put);
603             break;
604           case DELETE:
605             Delete delete = ProtobufUtil.toDelete(action.getMutation(), cellScanner);
606             ++countOfCompleteMutation;
607             rm.add(delete);
608             break;
609           default:
610             throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
611         }
612         // To unify the response format with doNonAtomicRegionMutation and read through client's
613         // AsyncProcess we have to add an empty result instance per operation
614         resultOrExceptionOrBuilder.clear();
615         resultOrExceptionOrBuilder.setIndex(i++);
616         builder.addResultOrException(
617           resultOrExceptionOrBuilder.build());
618       }
619       return region.checkAndRowMutate(row, family, qualifier, compareOp,
620         comparator, rm, Boolean.TRUE);
621     } finally {
622       // Currently, the checkAndMutate isn't supported by batch so it won't mess up the cell scanner
623       // even if the malformed cells are not skipped.
624       for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
625         skipCellsForMutation(actions.get(i), cellScanner);
626       }
627     }
628   }
629 
630   /**
631    * Execute an append mutation.
632    *
633    * @param region
634    * @param mutation
635    * @param cellScanner
636    * @param nonce group
637    * @return result to return to client if default operation should be
638    * bypassed as indicated by RegionObserver, null otherwise
639    * @throws IOException
640    */
641   private Result append(final Region region, final OperationQuota quota,
642       final MutationProto mutation, final CellScanner cellScanner, long nonceGroup)
643       throws IOException {
644     long before = EnvironmentEdgeManager.currentTime();
645     Append append = ProtobufUtil.toAppend(mutation, cellScanner);
646     checkCellSizeLimit(region, append);
647     quota.addMutation(append);
648     Result r = null;
649     if (region.getCoprocessorHost() != null) {
650       r = region.getCoprocessorHost().preAppend(append);
651     }
652     if (r == null) {
653       boolean canProceed = startNonceOperation(mutation, nonceGroup);
654       boolean success = false;
655       try {
656         long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
657         if (canProceed) {
658           r = region.append(append, nonceGroup, nonce);
659         } else {
660           // convert duplicate append to get
661           List<Cell> results = region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
662             nonceGroup, nonce);
663           r = Result.create(results);
664         }
665         success = true;
666       } finally {
667         if (canProceed) {
668           endNonceOperation(mutation, nonceGroup, success);
669         }
670       }
671       if (region.getCoprocessorHost() != null) {
672         r = region.getCoprocessorHost().postAppend(append, r);
673       }
674     }
675     if (regionServer.metricsRegionServer != null) {
676       regionServer.metricsRegionServer.updateAppend(
677           region.getTableDesc().getTableName(),
678         EnvironmentEdgeManager.currentTime() - before);
679     }
680     return r;
681   }
682 
683   /**
684    * Execute an increment mutation.
685    *
686    * @param region
687    * @param mutation
688    * @param cellScanner
689    * @param nonce group
690    * @return the Result
691    * @throws IOException
692    */
693   private Result increment(final Region region, final OperationQuota quota,
694       final MutationProto mutation, final CellScanner cellScanner, long nonceGroup)
695       throws IOException {
696     long before = EnvironmentEdgeManager.currentTime();
697     Increment increment = ProtobufUtil.toIncrement(mutation, cellScanner);
698     checkCellSizeLimit(region, increment);
699     quota.addMutation(increment);
700     Result r = null;
701     if (region.getCoprocessorHost() != null) {
702       r = region.getCoprocessorHost().preIncrement(increment);
703     }
704     if (r == null) {
705       boolean canProceed = startNonceOperation(mutation, nonceGroup);
706       boolean success = false;
707       try {
708         long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
709         if (canProceed) {
710           r = region.increment(increment, nonceGroup, nonce);
711         } else {
712           // convert duplicate increment to get
713           List<Cell> results = region.get(ProtobufUtil.toGet(mutation, cellScanner), false,
714             nonceGroup, nonce);
715           r = Result.create(results);
716         }
717         success = true;
718       } finally {
719         if (canProceed) {
720           endNonceOperation(mutation, nonceGroup, success);
721         }
722       }
723       if (region.getCoprocessorHost() != null) {
724         r = region.getCoprocessorHost().postIncrement(increment, r);
725       }
726     }
727     if (regionServer.metricsRegionServer != null) {
728       regionServer.metricsRegionServer.updateIncrement(
729           region.getTableDesc().getTableName(),
730           EnvironmentEdgeManager.currentTime() - before);
731     }
732     return r;
733   }
734 
735   /**
736    * Run through the regionMutation <code>rm</code> and per Mutation, do the work, and then when
737    * done, add an instance of a {@link ResultOrException} that corresponds to each Mutation.
738    * @param region
739    * @param actions
740    * @param cellScanner
741    * @param builder
742    * @param cellsToReturn  Could be null. May be allocated in this method.  This is what this
743    * method returns as a 'result'.
744    * @return Return the <code>cellScanner</code> passed
745    */
746   private List<CellScannable> doNonAtomicRegionMutation(final Region region,
747       final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner,
748       final RegionActionResult.Builder builder, List<CellScannable> cellsToReturn, long nonceGroup) {
749     // Gather up CONTIGUOUS Puts and Deletes in this mutations List.  Idea is that rather than do
750     // one at a time, we instead pass them in batch.  Be aware that the corresponding
751     // ResultOrException instance that matches each Put or Delete is then added down in the
752     // doBatchOp call.  We should be staying aligned though the Put and Delete are deferred/batched
753     List<ClientProtos.Action> mutations = null;
754     long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
755     RpcCallContext context = RpcServer.getCurrentCall();
756     IOException sizeIOE = null;
757     Object lastBlock = null;
758     ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder();
759     boolean hasResultOrException = false;
760     for (ClientProtos.Action action : actions.getActionList()) {
761       hasResultOrException = false;
762       resultOrExceptionBuilder.clear();
763       try {
764         Result r = null;
765 
766         if (context != null
767             && context.isRetryImmediatelySupported()
768             && (context.getResponseCellSize() > maxQuotaResultSize
769               || context.getResponseBlockSize() + context.getResponseExceptionSize()
770               > maxQuotaResultSize)) {
771 
772           // We're storing the exception since the exception and reason string won't
773           // change after the response size limit is reached.
774           if (sizeIOE == null ) {
775             // We don't need the stack un-winding do don't throw the exception.
776             // Throwing will kill the JVM's JIT.
777             //
778             // Instead just create the exception and then store it.
779             sizeIOE = new MultiActionResultTooLarge("Max size exceeded"
780                 + " CellSize: " + context.getResponseCellSize()
781                 + " BlockSize: " + context.getResponseBlockSize());
782 
783             // Only report the exception once since there's only one request that
784             // caused the exception. Otherwise this number will dominate the exceptions count.
785             rpcServer.getMetrics().exception(sizeIOE);
786           }
787 
788           // Now that there's an exception is known to be created
789           // use it for the response.
790           //
791           // This will create a copy in the builder.
792           hasResultOrException = true;
793           NameBytesPair pair = ResponseConverter.buildException(sizeIOE);
794           resultOrExceptionBuilder.setException(pair);
795           context.incrementResponseExceptionSize(pair.getSerializedSize());
796           resultOrExceptionBuilder.setIndex(action.getIndex());
797           builder.addResultOrException(resultOrExceptionBuilder.build());
798           skipCellsForMutation(action, cellScanner);
799           continue;
800         }
801         if (action.hasGet()) {
802           long before = EnvironmentEdgeManager.currentTime();
803           try {
804             Get get = ProtobufUtil.toGet(action.getGet());
805             r = region.get(get);
806           } finally {
807             if (regionServer.metricsRegionServer != null) {
808               regionServer.metricsRegionServer.updateGet(
809                   region.getTableDesc().getTableName(),
810                   EnvironmentEdgeManager.currentTime() - before);
811             }
812           }
813         } else if (action.hasServiceCall()) {
814           hasResultOrException = true;
815           try {
816             Message result = execServiceOnRegion(region, action.getServiceCall());
817             ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder =
818                 ClientProtos.CoprocessorServiceResult.newBuilder();
819             resultOrExceptionBuilder.setServiceResult(
820                 serviceResultBuilder.setValue(
821                   serviceResultBuilder.getValueBuilder()
822                     .setName(result.getClass().getName())
823                     .setValue(result.toByteString())));
824           } catch (IOException ioe) {
825             rpcServer.getMetrics().exception(ioe);
826             NameBytesPair pair = ResponseConverter.buildException(ioe);
827             resultOrExceptionBuilder.setException(pair);
828             context.incrementResponseExceptionSize(pair.getSerializedSize());
829           }
830         } else if (action.hasMutation()) {
831           MutationType type = action.getMutation().getMutateType();
832           if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null &&
833               !mutations.isEmpty()) {
834             // Flush out any Puts or Deletes already collected.
835             doBatchOp(builder, region, quota, mutations, cellScanner);
836             mutations.clear();
837           }
838           switch (type) {
839           case APPEND:
840             r = append(region, quota, action.getMutation(), cellScanner, nonceGroup);
841             break;
842           case INCREMENT:
843             r = increment(region, quota, action.getMutation(), cellScanner,  nonceGroup);
844             break;
845           case PUT:
846           case DELETE:
847             // Collect the individual mutations and apply in a batch
848             if (mutations == null) {
849               mutations = new ArrayList<ClientProtos.Action>(actions.getActionCount());
850             }
851             mutations.add(action);
852             break;
853           default:
854             throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
855           }
856         } else {
857           throw new HBaseIOException("Unexpected Action type");
858         }
859         if (r != null) {
860           ClientProtos.Result pbResult = null;
861           if (isClientCellBlockSupport()) {
862             pbResult = ProtobufUtil.toResultNoData(r);
863             //  Hard to guess the size here.  Just make a rough guess.
864             if (cellsToReturn == null) {
865               cellsToReturn = new ArrayList<CellScannable>();
866             }
867             cellsToReturn.add(r);
868           } else {
869             pbResult = ProtobufUtil.toResult(r);
870           }
871           lastBlock = addSize(context, r, lastBlock);
872           hasResultOrException = true;
873           resultOrExceptionBuilder.setResult(pbResult);
874         }
875         // Could get to here and there was no result and no exception.  Presumes we added
876         // a Put or Delete to the collecting Mutations List for adding later.  In this
877         // case the corresponding ResultOrException instance for the Put or Delete will be added
878         // down in the doBatchOp method call rather than up here.
879       } catch (IOException ie) {
880         rpcServer.getMetrics().exception(ie);
881         hasResultOrException = true;
882         NameBytesPair pair = ResponseConverter.buildException(ie);
883         resultOrExceptionBuilder.setException(pair);
884         context.incrementResponseExceptionSize(pair.getSerializedSize());
885       }
886       if (hasResultOrException) {
887         // Propagate index.
888         resultOrExceptionBuilder.setIndex(action.getIndex());
889         builder.addResultOrException(resultOrExceptionBuilder.build());
890       }
891     }
892     // Finish up any outstanding mutations
893     if (mutations != null && !mutations.isEmpty()) {
894       doBatchOp(builder, region, quota, mutations, cellScanner);
895     }
896     return cellsToReturn;
897   }
898 
899   private void checkCellSizeLimit(final Region region, final Mutation m) throws IOException {
900     if (!(region instanceof HRegion)) {
901       return;
902     }
903     HRegion r = (HRegion)region;
904     if (r.maxCellSize > 0) {
905       CellScanner cells = m.cellScanner();
906       while (cells.advance()) {
907         int size = CellUtil.estimatedSerializedSizeOf(cells.current());
908         if (size > r.maxCellSize) {
909           String msg = "Cell with size " + size + " exceeds limit of " + r.maxCellSize + " bytes";
910           if (LOG.isDebugEnabled()) {
911             LOG.debug(msg);
912           }
913           throw new DoNotRetryIOException(msg);
914         }
915       }
916     }
917   }
918 
919   /**
920    * Execute a list of Put/Delete mutations.
921    *
922    * @param builder
923    * @param region
924    * @param mutations
925    */
926   private void doBatchOp(final RegionActionResult.Builder builder, final Region region,
927       final OperationQuota quota,
928       final List<ClientProtos.Action> mutations, final CellScanner cells) {
929     Mutation[] mArray = new Mutation[mutations.size()];
930     long before = EnvironmentEdgeManager.currentTime();
931     boolean batchContainsPuts = false, batchContainsDelete = false;
932     try {
933       /** HBASE-17924
934        * mutationActionMap is a map to map the relation between mutations and actions
935        * since mutation array may have been reoredered.In order to return the right
936        * result or exception to the corresponding actions, We need to know which action
937        * is the mutation belong to. We can't sort ClientProtos.Action array, since they
938        * are bonded to cellscanners.
939        */
940       Map<Mutation, ClientProtos.Action> mutationActionMap = new HashMap<Mutation, ClientProtos.Action>();
941       int i = 0;
942       for (ClientProtos.Action action: mutations) {
943         MutationProto m = action.getMutation();
944         Mutation mutation;
945         if (m.getMutateType() == MutationType.PUT) {
946           mutation = ProtobufUtil.toPut(m, cells);
947           batchContainsPuts = true;
948         } else {
949           mutation = ProtobufUtil.toDelete(m, cells);
950           batchContainsDelete = true;
951         }
952         mutationActionMap.put(mutation, action);
953         mArray[i++] = mutation;
954         checkCellSizeLimit(region, mutation);
955         quota.addMutation(mutation);
956       }
957 
958       if (!region.getRegionInfo().isMetaTable()) {
959         regionServer.cacheFlusher.reclaimMemStoreMemory();
960       }
961 
962       // HBASE-17924
963       // sort to improve lock efficiency
964       Arrays.sort(mArray);
965 
966       OperationStatus[] codes = region.batchMutate(mArray, HConstants.NO_NONCE,
967         HConstants.NO_NONCE);
968       for (i = 0; i < codes.length; i++) {
969         Mutation currentMutation = mArray[i];
970         ClientProtos.Action currentAction = mutationActionMap.get(currentMutation);
971         int index = currentAction.getIndex();
972         Exception e = null;
973         switch (codes[i].getOperationStatusCode()) {
974           case BAD_FAMILY:
975             e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
976             builder.addResultOrException(getResultOrException(e, index));
977             break;
978 
979           case SANITY_CHECK_FAILURE:
980             e = new FailedSanityCheckException(codes[i].getExceptionMsg());
981             builder.addResultOrException(getResultOrException(e, index));
982             break;
983 
984           default:
985             e = new DoNotRetryIOException(codes[i].getExceptionMsg());
986             builder.addResultOrException(getResultOrException(e, index));
987             break;
988 
989           case SUCCESS:
990             builder.addResultOrException(getResultOrException(
991               ClientProtos.Result.getDefaultInstance(), index));
992             break;
993         }
994       }
995     } catch (IOException ie) {
996       int processedMutationIndex = 0;
997       for (Action mutation : mutations) {
998         // The non-null mArray[i] means the cell scanner has been read.
999         if (mArray[processedMutationIndex++] == null) {
1000           skipCellsForMutation(mutation, cells);
1001         }
1002         builder.addResultOrException(getResultOrException(ie, mutation.getIndex()));
1003       }
1004     }
1005     if (regionServer.metricsRegionServer != null) {
1006       long after = EnvironmentEdgeManager.currentTime();
1007       if (batchContainsPuts) {
1008         regionServer.metricsRegionServer.updatePutBatch(
1009             region.getTableDesc().getTableName(), after - before);
1010       }
1011       if (batchContainsDelete) {
1012         regionServer.metricsRegionServer.updateDeleteBatch(
1013             region.getTableDesc().getTableName(), after - before);
1014       }
1015     }
1016   }
1017 
1018   /**
1019    * Execute a list of Put/Delete mutations. The function returns OperationStatus instead of
1020    * constructing MultiResponse to save a possible loop if caller doesn't need MultiResponse.
1021    * @param region
1022    * @param mutations
1023    * @param replaySeqId
1024    * @return an array of OperationStatus which internally contains the OperationStatusCode and the
1025    *         exceptionMessage if any
1026    * @throws IOException
1027    */
1028   private OperationStatus [] doReplayBatchOp(final Region region,
1029       final List<WALSplitter.MutationReplay> mutations, long replaySeqId) throws IOException {
1030     long before = EnvironmentEdgeManager.currentTime();
1031     boolean batchContainsPuts = false, batchContainsDelete = false;
1032     try {
1033       for (Iterator<WALSplitter.MutationReplay> it = mutations.iterator(); it.hasNext();) {
1034         WALSplitter.MutationReplay m = it.next();
1035 
1036         if (m.type == MutationType.PUT) {
1037           batchContainsPuts = true;
1038         } else {
1039           batchContainsDelete = true;
1040         }
1041 
1042         NavigableMap<byte[], List<Cell>> map = m.mutation.getFamilyCellMap();
1043         List<Cell> metaCells = map.get(WALEdit.METAFAMILY);
1044         if (metaCells != null && !metaCells.isEmpty()) {
1045           for (Cell metaCell : metaCells) {
1046             CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell);
1047             boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
1048             HRegion hRegion = (HRegion)region;
1049             if (compactionDesc != null) {
1050               // replay the compaction. Remove the files from stores only if we are the primary
1051               // region replica (thus own the files)
1052               hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica,
1053                 replaySeqId);
1054               continue;
1055             }
1056             FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell);
1057             if (flushDesc != null && !isDefaultReplica) {
1058               hRegion.replayWALFlushMarker(flushDesc, replaySeqId);
1059               continue;
1060             }
1061             RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell);
1062             if (regionEvent != null && !isDefaultReplica) {
1063               hRegion.replayWALRegionEventMarker(regionEvent);
1064               continue;
1065             }
1066             BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell);
1067             if (bulkLoadEvent != null) {
1068               hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent);
1069               continue;
1070             }
1071           }
1072           it.remove();
1073         }
1074       }
1075       requestCount.increment();
1076       if (!region.getRegionInfo().isMetaTable()) {
1077         regionServer.cacheFlusher.reclaimMemStoreMemory();
1078       }
1079       return region.batchReplay(mutations.toArray(
1080         new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
1081     } finally {
1082       if (regionServer.metricsRegionServer != null) {
1083         long after = EnvironmentEdgeManager.currentTime();
1084         if (batchContainsPuts) {
1085           regionServer.metricsRegionServer.updatePutBatch(
1086               region.getTableDesc().getTableName(), after - before);
1087         }
1088         if (batchContainsDelete) {
1089           regionServer.metricsRegionServer.updateDeleteBatch(
1090               region.getTableDesc().getTableName(), after - before);
1091         }
1092       }
1093     }
1094   }
1095 
1096   private void closeAllScanners() {
1097     // Close any outstanding scanners. Means they'll get an UnknownScanner
1098     // exception next time they come in.
1099     for (Map.Entry<String, RegionScannerHolder> e : scanners.entrySet()) {
1100       try {
1101         e.getValue().s.close();
1102       } catch (IOException ioe) {
1103         LOG.warn("Closing scanner " + e.getKey(), ioe);
1104       }
1105     }
1106   }
1107 
1108   // Exposed for testing
1109   static interface LogDelegate {
1110     void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold);
1111   }
1112 
1113   private static LogDelegate DEFAULT_LOG_DELEGATE = new LogDelegate() {
1114     @Override
1115     public void logBatchWarning(String firstRegionName, int sum, int rowSizeWarnThreshold) {
1116       if (LOG.isWarnEnabled()) {
1117         LOG.warn("Large batch operation detected (greater than " + rowSizeWarnThreshold
1118             + ") (HBASE-18023)." + " Requested Number of Rows: " + sum + " Client: "
1119             + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress()
1120             + " first region in multi=" + firstRegionName);
1121       }
1122     }
1123   };
1124 
1125   private final LogDelegate ld;
1126 
1127   public RSRpcServices(HRegionServer rs) throws IOException {
1128     this(rs, DEFAULT_LOG_DELEGATE);
1129   }
1130 
1131   // Directly invoked only for testing
1132   RSRpcServices(HRegionServer rs, LogDelegate ld) throws IOException {
1133     this.ld = ld;
1134     regionServer = rs;
1135     rowSizeWarnThreshold = rs.conf.getInt(
1136       HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT);
1137     RpcSchedulerFactory rpcSchedulerFactory;
1138     rejectRowsWithSizeOverThreshold = rs.conf
1139       .getBoolean(REJECT_BATCH_ROWS_OVER_THRESHOLD, DEFAULT_REJECT_BATCH_ROWS_OVER_THRESHOLD);
1140 
1141     try {
1142       Class<?> rpcSchedulerFactoryClass = rs.conf.getClass(
1143           REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
1144           SimpleRpcSchedulerFactory.class);
1145       rpcSchedulerFactory = (RpcSchedulerFactory)
1146           rpcSchedulerFactoryClass.getDeclaredConstructor().newInstance();
1147     } catch (Exception e) {
1148       throw new IllegalArgumentException(e);
1149     }
1150     // Server to handle client requests.
1151     InetSocketAddress initialIsa;
1152     InetSocketAddress bindAddress;
1153     if(this instanceof MasterRpcServices) {
1154       String hostname = getHostname(rs.conf, true);
1155       int port = rs.conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
1156       // Creation of a HSA will force a resolve.
1157       initialIsa = new InetSocketAddress(hostname, port);
1158       bindAddress = new InetSocketAddress(rs.conf.get("hbase.master.ipc.address", hostname), port);
1159     } else {
1160       String hostname = getHostname(rs.conf, false);
1161       int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT,
1162         HConstants.DEFAULT_REGIONSERVER_PORT);
1163       // Creation of a HSA will force a resolve.
1164       initialIsa = new InetSocketAddress(hostname, port);
1165       bindAddress = new InetSocketAddress(
1166         rs.conf.get("hbase.regionserver.ipc.address", hostname), port);
1167     }
1168     if (initialIsa.getAddress() == null) {
1169       throw new IllegalArgumentException("Failed resolve of " + initialIsa);
1170     }
1171     priority = createPriority();
1172     String name = rs.getProcessName() + "/" + initialIsa.toString();
1173     // Set how many times to retry talking to another server over HConnection.
1174     ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
1175     try {
1176       rpcServer = new RpcServer(rs, name, getServices(),
1177           bindAddress, // use final bindAddress for this server.
1178           rs.conf,
1179           rpcSchedulerFactory.create(rs.conf, this, rs));
1180       rpcServer.setRsRpcServices(this);
1181     } catch (BindException be) {
1182       String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT :
1183           HConstants.REGIONSERVER_PORT;
1184       throw new IOException(be.getMessage() + ". To switch ports use the '" + configName +
1185           "' configuration property.", be.getCause() != null ? be.getCause() : be);
1186     }
1187 
1188     if (!(rs instanceof HMaster)) {
1189       rpcServer.setNamedQueueRecorder(rs.getNamedQueueRecorder());
1190     }
1191     scannerLeaseTimeoutPeriod = rs.conf.getInt(
1192       HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
1193       HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
1194     maxScannerResultSize = rs.conf.getLong(
1195       HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY,
1196       HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE);
1197     rpcTimeout = rs.conf.getInt(
1198       HConstants.HBASE_RPC_TIMEOUT_KEY,
1199       HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
1200     minimumScanTimeLimitDelta = rs.conf.getLong(
1201       REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA,
1202       DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA);
1203 
1204     InetSocketAddress address = rpcServer.getListenerAddress();
1205     if (address == null) {
1206       throw new IOException("Listener channel is closed");
1207     }
1208     // Set our address, however we need the final port that was given to rpcServer
1209     isa = new InetSocketAddress(initialIsa.getHostName(), address.getPort());
1210     rpcServer.setErrorHandler(this);
1211     rs.setName(name);
1212 
1213     closedScanners = CacheBuilder.newBuilder()
1214         .expireAfterAccess(scannerLeaseTimeoutPeriod, TimeUnit.MILLISECONDS).build();
1215   }
1216 
1217   @Override
1218   public void onConfigurationChange(Configuration newConf) {
1219     if (rpcServer instanceof ConfigurationObserver) {
1220       ((ConfigurationObserver)rpcServer).onConfigurationChange(newConf);
1221     }
1222   }
1223 
1224   protected PriorityFunction createPriority() {
1225     return new AnnotationReadingPriorityFunction(this);
1226   }
1227 
1228   protected void requirePermission(String request, Permission.Action perm) throws IOException {
1229     if (accessChecker != null) {
1230       accessChecker.requirePermission(RpcServer.getRequestUser(), request, perm);
1231     }
1232   }
1233 
1234 
1235   public static String getHostname(Configuration conf, boolean isMaster)
1236       throws UnknownHostException {
1237     String hostname = conf.get(isMaster? HRegionServer.MASTER_HOSTNAME_KEY :
1238       HRegionServer.RS_HOSTNAME_KEY);
1239     if (hostname == null || hostname.isEmpty()) {
1240       String masterOrRS = isMaster ? "master" : "regionserver";
1241       return Strings.domainNamePointerToHostName(DNS.getDefaultHost(
1242         conf.get("hbase." + masterOrRS + ".dns.interface", "default"),
1243         conf.get("hbase." + masterOrRS + ".dns.nameserver", "default")));
1244     } else {
1245       LOG.info("hostname is configured to be " + hostname);
1246       return hostname;
1247     }
1248   }
1249 
1250   public
1251   RegionScanner getScanner(long scannerId) {
1252     String scannerIdString = Long.toString(scannerId);
1253     RegionScannerHolder scannerHolder = scanners.get(scannerIdString);
1254     if (scannerHolder != null) {
1255       return scannerHolder.s;
1256     }
1257     return null;
1258   }
1259 
1260   public String getScanDetailsWithId(long scannerId) {
1261     RegionScanner scanner = getScanner(scannerId);
1262     if (scanner == null) {
1263       return null;
1264     }
1265     StringBuilder builder = new StringBuilder();
1266     builder.append("table: ").append(scanner.getRegionInfo().getTable().getNameAsString());
1267     builder.append(" region: ").append(scanner.getRegionInfo().getRegionNameAsString());
1268     return builder.toString();
1269   }
1270 
1271   public String getScanDetailsWithRequest(ScanRequest request) {
1272     try {
1273       if (!request.hasRegion()) {
1274         return null;
1275       }
1276       Region region = getRegion(request.getRegion());
1277       StringBuilder builder = new StringBuilder();
1278       builder.append("table: ").append(region.getRegionInfo().getTable().getNameAsString());
1279       builder.append(" region: ").append(region.getRegionInfo().getRegionNameAsString());
1280       return builder.toString();
1281     } catch (IOException ignored) {
1282       return null;
1283     }
1284   }
1285 
1286   /**
1287    * Get the vtime associated with the scanner.
1288    * Currently the vtime is the number of "next" calls.
1289    */
1290   long getScannerVirtualTime(long scannerId) {
1291     String scannerIdString = Long.toString(scannerId);
1292     RegionScannerHolder scannerHolder = scanners.get(scannerIdString);
1293     if (scannerHolder != null) {
1294       return scannerHolder.getNextCallSeq();
1295     }
1296     return 0L;
1297   }
1298 
1299   /**
1300    * Method to account for the size of retained cells and retained data blocks.
1301    * @return an object that represents the last referenced block from this response.
1302    */
1303   Object addSize(RpcCallContext context, Result r, Object lastBlock) {
1304     if (context != null && r != null && !r.isEmpty()) {
1305       for (Cell c : r.rawCells()) {
1306         context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c));
1307         // We're using the last block being the same as the current block as
1308         // a proxy for pointing to a new block. This won't be exact.
1309         // If there are multiple gets that bounce back and forth
1310         // Then it's possible that this will over count the size of
1311         // referenced blocks. However it's better to over count and
1312         // use two RPC's than to OOME the RegionServer.
1313         byte[] valueArray = c.getValueArray();
1314         if (valueArray != lastBlock) {
1315           context.incrementResponseBlockSize(valueArray.length);
1316           lastBlock = valueArray;
1317         }
1318       }
1319     }
1320     return lastBlock;
1321   }
1322 
1323   private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r,
1324       boolean needCursor) throws LeaseStillHeldException {
1325     regionServer.leases.createLease(scannerName, this.scannerLeaseTimeoutPeriod,
1326       new ScannerListener(scannerName));
1327     RegionScannerHolder rsh = new RegionScannerHolder(scannerName, s, r, needCursor);
1328     RegionScannerHolder existing = scanners.putIfAbsent(scannerName, rsh);
1329     assert existing == null : "scannerId must be unique within regionserver's whole lifecycle!";
1330     return rsh;
1331   }
1332 
1333   /**
1334    * Find the HRegion based on a region specifier
1335    *
1336    * @param regionSpecifier the region specifier
1337    * @return the corresponding region
1338    * @throws IOException if the specifier is not null,
1339    *    but failed to find the region
1340    */
1341   public Region getRegion(
1342       final RegionSpecifier regionSpecifier) throws IOException {
1343     ByteString value = regionSpecifier.getValue();
1344     RegionSpecifierType type = regionSpecifier.getType();
1345     switch (type) {
1346       case REGION_NAME:
1347         byte[] regionName = value.toByteArray();
1348         String encodedRegionName = HRegionInfo.encodeRegionName(regionName);
1349         return regionServer.getRegionByEncodedName(regionName, encodedRegionName);
1350       case ENCODED_REGION_NAME:
1351         return regionServer.getRegionByEncodedName(value.toStringUtf8());
1352       default:
1353         throw new DoNotRetryIOException(
1354           "Unsupported region specifier type: " + type);
1355     }
1356   }
1357 
1358   public PriorityFunction getPriority() {
1359     return priority;
1360   }
1361 
1362   public Configuration getConfiguration() {
1363     return regionServer.getConfiguration();
1364   }
1365 
1366   private RegionServerQuotaManager getQuotaManager() {
1367     return regionServer.getRegionServerQuotaManager();
1368   }
1369 
1370   void start(ZooKeeperWatcher zkWatcher) {
1371     if (AccessChecker.isAuthorizationSupported(getConfiguration())) {
1372       accessChecker = new AccessChecker(getConfiguration(), zkWatcher);
1373     }
1374     this.scannerIdGenerator = new ScannerIdGenerator(this.regionServer.serverName);
1375     rpcServer.start();
1376   }
1377 
1378   void stop() {
1379     if (accessChecker != null) {
1380       accessChecker.stop();
1381     }
1382     closeAllScanners();
1383     rpcServer.stop();
1384   }
1385 
1386   /**
1387    * Called to verify that this server is up and running.
1388    */
1389   // TODO : Rename this and HMaster#checkInitialized to isRunning() (or a better name).
1390   protected void checkOpen() throws IOException {
1391     if (regionServer.isAborted()) {
1392       throw new RegionServerAbortedException("Server " + regionServer.serverName + " aborting");
1393     }
1394     if (regionServer.isStopped()) {
1395       throw new RegionServerStoppedException("Server " + regionServer.serverName + " stopping");
1396     }
1397     if (!regionServer.fsOk) {
1398       throw new RegionServerStoppedException("File system not available");
1399     }
1400     if (!regionServer.isOnline()) {
1401       throw new ServerNotRunningYetException("Server " + regionServer.serverName
1402           + " is not running yet");
1403     }
1404   }
1405 
1406   /**
1407    * @return list of blocking services and their security info classes that this server supports
1408    */
1409   protected List<BlockingServiceAndInterface> getServices() {
1410     List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(2);
1411     bssi.add(new BlockingServiceAndInterface(
1412       ClientService.newReflectiveBlockingService(this),
1413       ClientService.BlockingInterface.class));
1414     bssi.add(new BlockingServiceAndInterface(
1415       AdminService.newReflectiveBlockingService(this),
1416       AdminService.BlockingInterface.class));
1417     return bssi;
1418   }
1419 
1420   public InetSocketAddress getSocketAddress() {
1421     return isa;
1422   }
1423 
1424   @Override
1425   public int getPriority(RequestHeader header, Message param, User user) {
1426     return priority.getPriority(header, param, user);
1427   }
1428 
1429   @Override
1430   public long getDeadline(RequestHeader header, Message param) {
1431     return priority.getDeadline(header, param);
1432   }
1433 
1434   /*
1435    * Check if an OOME and, if so, abort immediately to avoid creating more objects.
1436    *
1437    * @param e
1438    *
1439    * @return True if we OOME'd and are aborting.
1440    */
1441   @Override
1442   public boolean checkOOME(final Throwable e) {
1443     return exitIfOOME(e);
1444   }
1445 
1446   public static boolean exitIfOOME(final Throwable e ){
1447     boolean stop = false;
1448     try {
1449       if (e instanceof OutOfMemoryError
1450           || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError)
1451           || (e.getMessage() != null && e.getMessage().contains(
1452               "java.lang.OutOfMemoryError"))) {
1453         stop = true;
1454         LOG.fatal("Run out of memory; " + RSRpcServices.class.getSimpleName()
1455           + " will abort itself immediately", e);
1456       }
1457     } finally {
1458       if (stop) {
1459         Runtime.getRuntime().halt(1);
1460       }
1461     }
1462     return stop;
1463   }
1464 
1465   /**
1466    * Close a region on the region server.
1467    *
1468    * @param controller the RPC controller
1469    * @param request the request
1470    * @throws ServiceException
1471    */
1472   @Override
1473   @QosPriority(priority=HConstants.ADMIN_QOS)
1474   public CloseRegionResponse closeRegion(final RpcController controller,
1475       final CloseRegionRequest request) throws ServiceException {
1476     final ServerName sn = (request.hasDestinationServer() ?
1477       ProtobufUtil.toServerName(request.getDestinationServer()) : null);
1478 
1479     try {
1480       checkOpen();
1481       if (request.hasServerStartCode()) {
1482         // check that we are the same server that this RPC is intended for.
1483         long serverStartCode = request.getServerStartCode();
1484         if (regionServer.serverName.getStartcode() !=  serverStartCode) {
1485           throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " +
1486               "different server with startCode: " + serverStartCode + ", this server is: "
1487               + regionServer.serverName));
1488         }
1489       }
1490       final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
1491 
1492       requestCount.increment();
1493       LOG.info("Close " + encodedRegionName + ", moving to " + sn);
1494       CloseRegionCoordination.CloseRegionDetails crd = regionServer.getCoordinatedStateManager()
1495         .getCloseRegionCoordination().parseFromProtoRequest(request);
1496 
1497       boolean closed = regionServer.closeRegion(encodedRegionName, false, crd, sn);
1498       CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed);
1499       return builder.build();
1500     } catch (IOException ie) {
1501       throw new ServiceException(ie);
1502     }
1503   }
1504 
1505   /**
1506    * Compact a region on the region server.
1507    *
1508    * @param controller the RPC controller
1509    * @param request the request
1510    * @throws ServiceException
1511    */
1512   @Override
1513   @QosPriority(priority=HConstants.ADMIN_QOS)
1514   public CompactRegionResponse compactRegion(final RpcController controller,
1515       final CompactRegionRequest request) throws ServiceException {
1516     try {
1517       checkOpen();
1518       requestCount.increment();
1519       Region region = getRegion(request.getRegion());
1520       region.startRegionOperation(Operation.COMPACT_REGION);
1521       LOG.info("Compacting " + region.getRegionInfo().getRegionNameAsString());
1522       boolean major = false;
1523       byte [] family = null;
1524       Store store = null;
1525       if (request.hasFamily()) {
1526         family = request.getFamily().toByteArray();
1527         store = region.getStore(family);
1528         if (store == null) {
1529           throw new ServiceException(new IOException("column family " + Bytes.toString(family)
1530             + " does not exist in region " + region.getRegionInfo().getRegionNameAsString()));
1531         }
1532       }
1533       if (request.hasMajor()) {
1534         major = request.getMajor();
1535       }
1536       if (major) {
1537         if (family != null) {
1538           store.triggerMajorCompaction();
1539         } else {
1540           region.triggerMajorCompaction();
1541         }
1542       }
1543 
1544       String familyLogMsg = (family != null)?" for column family: " + Bytes.toString(family):"";
1545       if (LOG.isTraceEnabled()) {
1546         LOG.trace("User-triggered compaction requested for region "
1547           + region.getRegionInfo().getRegionNameAsString() + familyLogMsg);
1548       }
1549       String log = "User-triggered " + (major ? "major " : "") + "compaction" + familyLogMsg;
1550       if(family != null) {
1551         regionServer.compactSplitThread.requestCompaction(region, store, log,
1552           Store.PRIORITY_USER, null, RpcServer.getRequestUser());
1553       } else {
1554         regionServer.compactSplitThread.requestCompaction(region, log,
1555           Store.PRIORITY_USER, null, RpcServer.getRequestUser());
1556       }
1557       return CompactRegionResponse.newBuilder().build();
1558     } catch (IOException ie) {
1559       throw new ServiceException(ie);
1560     }
1561   }
1562 
1563   @Override
1564   public CompactionSwitchResponse compactionSwitch(RpcController controller,
1565       CompactionSwitchRequest request) throws ServiceException {
1566     try {
1567       checkOpen();
1568       requestCount.increment();
1569       boolean prevState = regionServer.compactSplitThread.isCompactionsEnabled();
1570       CompactionSwitchResponse response =
1571           CompactionSwitchResponse.newBuilder().setPrevState(prevState).build();
1572       if (prevState == request.getEnabled()) {
1573         // passed in requested state is same as current state. No action required
1574         return response;
1575       }
1576       regionServer.compactSplitThread.switchCompaction(request.getEnabled());
1577       return response;
1578     } catch (IOException ie) {
1579       throw new ServiceException(ie);
1580     }
1581   }
1582 
1583   /**
1584    * Flush a region on the region server.
1585    *
1586    * @param controller the RPC controller
1587    * @param request the request
1588    * @throws ServiceException
1589    */
1590   @Override
1591   @QosPriority(priority=HConstants.ADMIN_QOS)
1592   public FlushRegionResponse flushRegion(final RpcController controller,
1593       final FlushRegionRequest request) throws ServiceException {
1594     try {
1595       checkOpen();
1596       requestCount.increment();
1597       Region region = getRegion(request.getRegion());
1598       LOG.info("Flushing " + region.getRegionInfo().getRegionNameAsString());
1599       boolean shouldFlush = true;
1600       if (request.hasIfOlderThanTs()) {
1601         shouldFlush = region.getEarliestFlushTimeForAllStores() < request.getIfOlderThanTs();
1602       }
1603       FlushRegionResponse.Builder builder = FlushRegionResponse.newBuilder();
1604       if (shouldFlush) {
1605         boolean writeFlushWalMarker =  request.hasWriteFlushWalMarker() ?
1606             request.getWriteFlushWalMarker() : false;
1607         // Go behind the curtain so we can manage writing of the flush WAL marker
1608         HRegion.FlushResultImpl flushResult = (HRegion.FlushResultImpl)
1609             ((HRegion)region).flushcache(true, writeFlushWalMarker);
1610         boolean compactionNeeded = flushResult.isCompactionNeeded();
1611         if (compactionNeeded) {
1612           regionServer.compactSplitThread.requestSystemCompaction(region,
1613             "Compaction through user triggered flush");
1614         }
1615         builder.setFlushed(flushResult.isFlushSucceeded());
1616         builder.setWroteFlushWalMarker(flushResult.wroteFlushWalMarker);
1617       }
1618       builder.setLastFlushTime(region.getEarliestFlushTimeForAllStores());
1619       return builder.build();
1620     } catch (DroppedSnapshotException ex) {
1621       // Cache flush can fail in a few places. If it fails in a critical
1622       // section, we get a DroppedSnapshotException and a replay of wal
1623       // is required. Currently the only way to do this is a restart of
1624       // the server.
1625       regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
1626       throw new ServiceException(ex);
1627     } catch (IOException ie) {
1628       throw new ServiceException(ie);
1629     }
1630   }
1631 
1632   @Override
1633   @QosPriority(priority=HConstants.ADMIN_QOS)
1634   public GetOnlineRegionResponse getOnlineRegion(final RpcController controller,
1635       final GetOnlineRegionRequest request) throws ServiceException {
1636     try {
1637       checkOpen();
1638       requestCount.increment();
1639       Map<String, Region> onlineRegions = regionServer.onlineRegions;
1640       List<HRegionInfo> list = new ArrayList<HRegionInfo>(onlineRegions.size());
1641       for (Region region: onlineRegions.values()) {
1642         list.add(region.getRegionInfo());
1643       }
1644       Collections.sort(list);
1645       return ResponseConverter.buildGetOnlineRegionResponse(list);
1646     } catch (IOException ie) {
1647       throw new ServiceException(ie);
1648     }
1649   }
1650 
1651   @Override
1652   @QosPriority(priority=HConstants.ADMIN_QOS)
1653   public GetRegionInfoResponse getRegionInfo(final RpcController controller,
1654       final GetRegionInfoRequest request) throws ServiceException {
1655     try {
1656       checkOpen();
1657       requestCount.increment();
1658       Region region = getRegion(request.getRegion());
1659       HRegionInfo info = region.getRegionInfo();
1660       GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
1661       builder.setRegionInfo(HRegionInfo.convert(info));
1662       if (request.hasCompactionState() && request.getCompactionState()) {
1663         builder.setCompactionState(region.getCompactionState());
1664       }
1665       builder.setIsRecovering(region.isRecovering());
1666       return builder.build();
1667     } catch (IOException ie) {
1668       throw new ServiceException(ie);
1669     }
1670   }
1671 
1672   /**
1673    * Get some information of the region server.
1674    *
1675    * @param controller the RPC controller
1676    * @param request the request
1677    * @throws ServiceException
1678    */
1679   @Override
1680   @QosPriority(priority=HConstants.ADMIN_QOS)
1681   public GetServerInfoResponse getServerInfo(final RpcController controller,
1682       final GetServerInfoRequest request) throws ServiceException {
1683     try {
1684       checkOpen();
1685     } catch (IOException ie) {
1686       throw new ServiceException(ie);
1687     }
1688     requestCount.increment();
1689     int infoPort = regionServer.infoServer != null ? regionServer.infoServer.getPort() : -1;
1690     return ResponseConverter.buildGetServerInfoResponse(regionServer.serverName, infoPort);
1691   }
1692 
1693   @Override
1694   @QosPriority(priority=HConstants.ADMIN_QOS)
1695   public GetStoreFileResponse getStoreFile(final RpcController controller,
1696       final GetStoreFileRequest request) throws ServiceException {
1697     try {
1698       checkOpen();
1699       Region region = getRegion(request.getRegion());
1700       requestCount.increment();
1701       Set<byte[]> columnFamilies;
1702       if (request.getFamilyCount() == 0) {
1703         columnFamilies = region.getTableDesc().getFamiliesKeys();
1704       } else {
1705         columnFamilies = new TreeSet<byte[]>(Bytes.BYTES_RAWCOMPARATOR);
1706         for (ByteString cf: request.getFamilyList()) {
1707           columnFamilies.add(cf.toByteArray());
1708         }
1709       }
1710       int nCF = columnFamilies.size();
1711       List<String>  fileList = region.getStoreFileList(
1712         columnFamilies.toArray(new byte[nCF][]));
1713       GetStoreFileResponse.Builder builder = GetStoreFileResponse.newBuilder();
1714       builder.addAllStoreFile(fileList);
1715       return builder.build();
1716     } catch (IOException ie) {
1717       throw new ServiceException(ie);
1718     }
1719   }
1720 
1721   /**
1722    * Merge regions on the region server.
1723    *
1724    * @param controller the RPC controller
1725    * @param request the request
1726    * @return merge regions response
1727    * @throws ServiceException
1728    */
1729   @Override
1730   @QosPriority(priority = HConstants.ADMIN_QOS)
1731   public MergeRegionsResponse mergeRegions(final RpcController controller,
1732       final MergeRegionsRequest request) throws ServiceException {
1733     try {
1734       checkOpen();
1735       requestCount.increment();
1736       Region regionA = getRegion(request.getRegionA());
1737       Region regionB = getRegion(request.getRegionB());
1738       boolean forcible = request.getForcible();
1739       long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
1740       regionA.startRegionOperation(Operation.MERGE_REGION);
1741       regionB.startRegionOperation(Operation.MERGE_REGION);
1742       if (regionA.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
1743           regionB.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
1744         throw new ServiceException(new MergeRegionException("Can't merge non-default replicas"));
1745       }
1746       LOG.info("Receiving merging request for  " + regionA + ", " + regionB
1747           + ",forcible=" + forcible);
1748       regionA.flush(true);
1749       regionB.flush(true);
1750       regionServer.compactSplitThread.requestRegionsMerge(regionA, regionB, forcible,
1751           masterSystemTime, RpcServer.getRequestUser());
1752       return MergeRegionsResponse.newBuilder().build();
1753     } catch (DroppedSnapshotException ex) {
1754       regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
1755       throw new ServiceException(ex);
1756     } catch (IOException ie) {
1757       throw new ServiceException(ie);
1758     }
1759   }
1760 
1761   /**
1762    * Open asynchronously a region or a set of regions on the region server.
1763    *
1764    * The opening is coordinated by ZooKeeper, and this method requires the znode to be created
1765    *  before being called. As a consequence, this method should be called only from the master.
1766    * <p>
1767    * Different manages states for the region are:
1768    * </p><ul>
1769    *  <li>region not opened: the region opening will start asynchronously.</li>
1770    *  <li>a close is already in progress: this is considered as an error.</li>
1771    *  <li>an open is already in progress: this new open request will be ignored. This is important
1772    *  because the Master can do multiple requests if it crashes.</li>
1773    *  <li>the region is already opened:  this new open request will be ignored.</li>
1774    *  </ul>
1775    * <p>
1776    * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign.
1777    * For a single region opening, errors are sent through a ServiceException. For bulk assign,
1778    * errors are put in the response as FAILED_OPENING.
1779    * </p>
1780    * @param controller the RPC controller
1781    * @param request the request
1782    * @throws ServiceException
1783    */
1784   @Override
1785   @QosPriority(priority=HConstants.ADMIN_QOS)
1786   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
1787     value="JLM_JSR166_UTILCONCURRENT_MONITORENTER",
1788     justification="We double up use of an atomic both as monitor and condition variable")
1789   public OpenRegionResponse openRegion(final RpcController controller,
1790       final OpenRegionRequest request) throws ServiceException {
1791     requestCount.increment();
1792     if (request.hasServerStartCode()) {
1793       // check that we are the same server that this RPC is intended for.
1794       long serverStartCode = request.getServerStartCode();
1795       if (regionServer.serverName.getStartcode() !=  serverStartCode) {
1796         throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " +
1797             "different server with startCode: " + serverStartCode + ", this server is: "
1798             + regionServer.serverName));
1799       }
1800     }
1801 
1802     OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
1803     final int regionCount = request.getOpenInfoCount();
1804     final Map<TableName, HTableDescriptor> htds =
1805         new HashMap<TableName, HTableDescriptor>(regionCount);
1806     final boolean isBulkAssign = regionCount > 1;
1807     try {
1808       checkOpen();
1809     } catch (IOException ie) {
1810       TableName tableName = null;
1811       if (regionCount == 1) {
1812         RegionInfo ri = request.getOpenInfo(0).getRegion();
1813         if (ri != null) {
1814           tableName = ProtobufUtil.toTableName(ri.getTableName());
1815         }
1816       }
1817       if (!TableName.META_TABLE_NAME.equals(tableName)) {
1818         throw new ServiceException(ie);
1819       }
1820       // We are assigning meta, wait a little for regionserver to finish initialization.
1821       int timeout = regionServer.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
1822         HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout
1823       long endTime = System.currentTimeMillis() + timeout;
1824       synchronized (regionServer.online) {
1825         try {
1826           while (System.currentTimeMillis() <= endTime
1827               && !regionServer.isStopped() && !regionServer.isOnline()) {
1828             regionServer.online.wait(regionServer.msgInterval);
1829           }
1830           checkOpen();
1831         } catch (InterruptedException t) {
1832           Thread.currentThread().interrupt();
1833           throw new ServiceException(t);
1834         } catch (IOException e) {
1835           throw new ServiceException(e);
1836         }
1837       }
1838     }
1839 
1840     long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
1841 
1842     for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
1843       final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
1844       OpenRegionCoordination coordination = regionServer.getCoordinatedStateManager().
1845         getOpenRegionCoordination();
1846       OpenRegionCoordination.OpenRegionDetails ord =
1847         coordination.parseFromProtoRequest(regionOpenInfo);
1848 
1849       HTableDescriptor htd;
1850       try {
1851         final Region onlineRegion = regionServer.getFromOnlineRegions(region.getEncodedName());
1852         if (onlineRegion != null) {
1853           //Check if the region can actually be opened.
1854           if (onlineRegion.getCoprocessorHost() != null) {
1855             onlineRegion.getCoprocessorHost().preOpen();
1856           }
1857           // See HBASE-5094. Cross check with hbase:meta if still this RS is owning
1858           // the region.
1859           Pair<HRegionInfo, ServerName> p = MetaTableAccessor.getRegion(
1860             regionServer.getConnection(), region.getRegionName());
1861           if (regionServer.serverName.equals(p.getSecond())) {
1862             Boolean closing = regionServer.regionsInTransitionInRS.get(region.getEncodedNameAsBytes());
1863             // Map regionsInTransitionInRSOnly has an entry for a region only if the region
1864             // is in transition on this RS, so here closing can be null. If not null, it can
1865             // be true or false. True means the region is opening on this RS; while false
1866             // means the region is closing. Only return ALREADY_OPENED if not closing (i.e.
1867             // not in transition any more, or still transition to open.
1868             if (!Boolean.FALSE.equals(closing)
1869                 && regionServer.getFromOnlineRegions(region.getEncodedName()) != null) {
1870               LOG.warn("Attempted open of " + region.getEncodedName()
1871                 + " but already online on this server");
1872               builder.addOpeningState(RegionOpeningState.ALREADY_OPENED);
1873               continue;
1874             }
1875           } else {
1876             LOG.warn("The region " + region.getEncodedName() + " is online on this server"
1877               + " but hbase:meta does not have this server - continue opening.");
1878             regionServer.removeFromOnlineRegions(onlineRegion, null);
1879           }
1880         }
1881         LOG.info("Open " + region.getRegionNameAsString());
1882         htd = htds.get(region.getTable());
1883         if (htd == null) {
1884           htd = regionServer.tableDescriptors.get(region.getTable());
1885           htds.put(region.getTable(), htd);
1886         }
1887 
1888         final Boolean previous = regionServer.regionsInTransitionInRS.putIfAbsent(
1889           region.getEncodedNameAsBytes(), Boolean.TRUE);
1890 
1891         if (Boolean.FALSE.equals(previous)) {
1892           // There is a close in progress. We need to mark this open as failed in ZK.
1893 
1894           coordination.tryTransitionFromOfflineToFailedOpen(regionServer, region, ord);
1895 
1896           throw new RegionAlreadyInTransitionException("Received OPEN for the region:"
1897             + region.getRegionNameAsString() + " , which we are already trying to CLOSE ");
1898         }
1899 
1900         if (Boolean.TRUE.equals(previous)) {
1901           // An open is in progress. This is supported, but let's log this.
1902           LOG.info("Receiving OPEN for the region:" +
1903             region.getRegionNameAsString() + " , which we are already trying to OPEN"
1904               + " - ignoring this new request for this region.");
1905         }
1906 
1907         // We are opening this region. If it moves back and forth for whatever reason, we don't
1908         // want to keep returning the stale moved record while we are opening/if we close again.
1909         regionServer.removeFromMovedRegions(region.getEncodedName());
1910 
1911         if (previous == null) {
1912           // check if the region to be opened is marked in recovering state in ZK
1913           if (ZKSplitLog.isRegionMarkedRecoveringInZK(regionServer.getZooKeeper(),
1914               region.getEncodedName())) {
1915             // Check if current region open is for distributedLogReplay. This check is to support
1916             // rolling restart/upgrade where we want to Master/RS see same configuration
1917             if (!regionOpenInfo.hasOpenForDistributedLogReplay()
1918                   || regionOpenInfo.getOpenForDistributedLogReplay()) {
1919               regionServer.recoveringRegions.put(region.getEncodedName(), null);
1920             } else {
1921               // Remove stale recovery region from ZK when we open region not for recovering which
1922               // could happen when turn distributedLogReplay off from on.
1923               List<String> tmpRegions = new ArrayList<String>();
1924               tmpRegions.add(region.getEncodedName());
1925               ZKSplitLog.deleteRecoveringRegionZNodes(regionServer.getZooKeeper(),
1926                 tmpRegions);
1927             }
1928           }
1929           if (htd == null) {
1930             throw new IOException("Missing table descriptor for " + region.getEncodedName());
1931           }
1932           // If there is no action in progress, we can submit a specific handler.
1933           // Need to pass the expected version in the constructor.
1934           if (region.isMetaRegion()) {
1935             regionServer.service.submit(new OpenMetaHandler(
1936               regionServer, regionServer, region, htd, masterSystemTime, coordination, ord));
1937           } else {
1938             regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(),
1939               regionOpenInfo.getFavoredNodesList());
1940             if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
1941               regionServer.service.submit(new OpenPriorityRegionHandler(
1942                 regionServer, regionServer, region, htd, masterSystemTime, coordination, ord));
1943             } else {
1944               regionServer.service.submit(new OpenRegionHandler(
1945                 regionServer, regionServer, region, htd, masterSystemTime, coordination, ord));
1946             }
1947           }
1948         }
1949 
1950         builder.addOpeningState(RegionOpeningState.OPENED);
1951 
1952       } catch (KeeperException zooKeeperEx) {
1953         LOG.error("Can't retrieve recovering state from zookeeper", zooKeeperEx);
1954         throw new ServiceException(zooKeeperEx);
1955       } catch (IOException ie) {
1956         LOG.warn("Failed opening region " + region.getRegionNameAsString(), ie);
1957         if (isBulkAssign) {
1958           builder.addOpeningState(RegionOpeningState.FAILED_OPENING);
1959         } else {
1960           throw new ServiceException(ie);
1961         }
1962       }
1963     }
1964     return builder.build();
1965   }
1966 
1967   /**
1968    *  Wamrmup a region on this server.
1969    *
1970    * This method should only be called by Master. It synchrnously opens the region and
1971    * closes the region bringing the most important pages in cache.
1972    * <p>
1973    *
1974    * @param controller the RPC controller
1975    * @param request the request
1976    * @throws ServiceException
1977    */
1978   @Override
1979   public WarmupRegionResponse warmupRegion(final RpcController controller,
1980       final WarmupRegionRequest request) throws ServiceException {
1981 
1982     RegionInfo regionInfo = request.getRegionInfo();
1983     final HRegionInfo region = HRegionInfo.convert(regionInfo);
1984     HTableDescriptor htd;
1985     WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
1986 
1987     try {
1988       checkOpen();
1989       String encodedName = region.getEncodedName();
1990       byte[] encodedNameBytes = region.getEncodedNameAsBytes();
1991       final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName);
1992 
1993       if (onlineRegion != null) {
1994         LOG.info("Region already online. Skipping warming up " + region);
1995         return response;
1996       }
1997 
1998       if (LOG.isDebugEnabled()) {
1999         LOG.debug("Warming up Region " + region.getRegionNameAsString());
2000       }
2001 
2002       htd = regionServer.tableDescriptors.get(region.getTable());
2003 
2004       if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) {
2005         LOG.info("Region is in transition. Skipping warmup " + region);
2006         return response;
2007       }
2008 
2009       HRegion.warmupHRegion(region, htd, regionServer.getWAL(region),
2010           regionServer.getConfiguration(), regionServer, null);
2011 
2012     } catch (IOException ie) {
2013       LOG.error("Failed warming up region " + region.getRegionNameAsString(), ie);
2014       throw new ServiceException(ie);
2015     }
2016 
2017     return response;
2018   }
2019 
2020   /**
2021    * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
2022    * that the given mutations will be durable on the receiving RS if this method returns without any
2023    * exception.
2024    * @param controller the RPC controller
2025    * @param request the request
2026    * @throws ServiceException
2027    */
2028   @Override
2029   @QosPriority(priority = HConstants.REPLAY_QOS)
2030   public ReplicateWALEntryResponse replay(final RpcController controller,
2031       final ReplicateWALEntryRequest request) throws ServiceException {
2032     long before = EnvironmentEdgeManager.currentTime();
2033     CellScanner cells = ((HBaseRpcController) controller).cellScanner();
2034     try {
2035       checkOpen();
2036       List<WALEntry> entries = request.getEntryList();
2037       if (entries == null || entries.isEmpty()) {
2038         // empty input
2039         return ReplicateWALEntryResponse.newBuilder().build();
2040       }
2041       ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
2042       Region region = regionServer.getRegionByEncodedName(regionName.toStringUtf8());
2043       RegionCoprocessorHost coprocessorHost =
2044           ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())
2045             ? region.getCoprocessorHost()
2046             : null; // do not invoke coprocessors if this is a secondary region replica
2047       List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<Pair<WALKey, WALEdit>>();
2048 
2049       // Skip adding the edits to WAL if this is a secondary region replica
2050       boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
2051       Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
2052 
2053       for (WALEntry entry : entries) {
2054         if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
2055           throw new NotServingRegionException("Replay request contains entries from multiple " +
2056               "regions. First region:" + regionName.toStringUtf8() + " , other region:"
2057               + entry.getKey().getEncodedRegionName());
2058         }
2059         if (regionServer.nonceManager != null && isPrimary) {
2060           long nonceGroup = entry.getKey().hasNonceGroup()
2061             ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
2062           long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
2063           regionServer.nonceManager.reportOperationFromWal(
2064               nonceGroup,
2065               nonce,
2066               entry.getKey().getWriteTime());
2067         }
2068         Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null :
2069           new Pair<WALKey, WALEdit>();
2070         List<WALSplitter.MutationReplay> edits = WALSplitter.getMutationsFromWALEntry(entry,
2071           cells, walEntry, durability);
2072         if (coprocessorHost != null) {
2073           // Start coprocessor replay here. The coprocessor is for each WALEdit instead of a
2074           // KeyValue.
2075           if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(),
2076             walEntry.getSecond())) {
2077             // if bypass this log entry, ignore it ...
2078             continue;
2079           }
2080           walEntries.add(walEntry);
2081         }
2082         if(edits!=null && !edits.isEmpty()) {
2083           // HBASE-17924
2084           // sort to improve lock efficiency
2085           Collections.sort(edits);
2086           long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ?
2087             entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
2088           OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
2089           // check if it's a partial success
2090           for (int i = 0; result != null && i < result.length; i++) {
2091             if (result[i] != OperationStatus.SUCCESS) {
2092               throw new IOException(result[i].getExceptionMsg());
2093             }
2094           }
2095         }
2096       }
2097 
2098       //sync wal at the end because ASYNC_WAL is used above
2099       WAL wal = getWAL(region);
2100       if (wal != null) {
2101         wal.sync();
2102       }
2103 
2104       if (coprocessorHost != null) {
2105         for (Pair<WALKey, WALEdit> entry : walEntries) {
2106           coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(),
2107             entry.getSecond());
2108         }
2109       }
2110       return ReplicateWALEntryResponse.newBuilder().build();
2111     } catch (IOException ie) {
2112       throw new ServiceException(ie);
2113     } finally {
2114       if (regionServer.metricsRegionServer != null) {
2115         regionServer.metricsRegionServer.updateReplay(
2116           EnvironmentEdgeManager.currentTime() - before);
2117       }
2118     }
2119   }
2120 
2121   WAL getWAL(Region region) {
2122     return ((HRegion)region).getWAL();
2123   }
2124 
2125   /**
2126    * Replicate WAL entries on the region server.
2127    *
2128    * @param controller the RPC controller
2129    * @param request the request
2130    * @throws ServiceException
2131    */
2132   @Override
2133   @QosPriority(priority=HConstants.REPLICATION_QOS)
2134   public ReplicateWALEntryResponse replicateWALEntry(final RpcController controller,
2135       final ReplicateWALEntryRequest request) throws ServiceException {
2136     try {
2137       checkOpen();
2138       if (regionServer.replicationSinkHandler != null) {
2139         requestCount.increment();
2140         List<WALEntry> entries = request.getEntryList();
2141         CellScanner cellScanner = ((HBaseRpcController)controller).cellScanner();
2142         regionServer.getRegionServerCoprocessorHost().preReplicateLogEntries(entries, cellScanner);
2143         regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner,
2144           request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(),
2145           request.getSourceHFileArchiveDirPath());
2146         regionServer.getRegionServerCoprocessorHost().postReplicateLogEntries(entries, cellScanner);
2147         return ReplicateWALEntryResponse.newBuilder().build();
2148       } else {
2149         throw new ServiceException("Replication services are not initialized yet");
2150       }
2151     } catch (IOException ie) {
2152       throw new ServiceException(ie);
2153     }
2154   }
2155 
2156   /**
2157    * Roll the WAL writer of the region server.
2158    * @param controller the RPC controller
2159    * @param request the request
2160    * @throws ServiceException
2161    */
2162   @Override
2163   public RollWALWriterResponse rollWALWriter(final RpcController controller,
2164       final RollWALWriterRequest request) throws ServiceException {
2165     try {
2166       checkOpen();
2167       requestCount.increment();
2168       regionServer.getRegionServerCoprocessorHost().preRollWALWriterRequest();
2169       regionServer.walRoller.requestRollAll();
2170       regionServer.getRegionServerCoprocessorHost().postRollWALWriterRequest();
2171       RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder();
2172       return builder.build();
2173     } catch (IOException ie) {
2174       throw new ServiceException(ie);
2175     }
2176   }
2177 
2178   /**
2179    * Split a region on the region server.
2180    *
2181    * @param controller the RPC controller
2182    * @param request the request
2183    * @throws ServiceException
2184    */
2185   @Override
2186   @QosPriority(priority=HConstants.ADMIN_QOS)
2187   public SplitRegionResponse splitRegion(final RpcController controller,
2188       final SplitRegionRequest request) throws ServiceException {
2189     try {
2190       checkOpen();
2191       requestCount.increment();
2192       Region region = getRegion(request.getRegion());
2193       region.startRegionOperation(Operation.SPLIT_REGION);
2194       if (region.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
2195         throw new IOException("Can't split replicas directly. "
2196             + "Replicas are auto-split when their primary is split.");
2197       }
2198       LOG.info("Splitting " + region.getRegionInfo().getRegionNameAsString());
2199       region.flush(true);
2200       byte[] splitPoint = null;
2201       if (request.hasSplitPoint()) {
2202         splitPoint = request.getSplitPoint().toByteArray();
2203       }
2204       ((HRegion)region).forceSplit(splitPoint);
2205       regionServer.compactSplitThread.requestSplit(region, ((HRegion)region).checkSplit(),
2206         RpcServer.getRequestUser());
2207       return SplitRegionResponse.newBuilder().build();
2208     } catch (DroppedSnapshotException ex) {
2209       regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
2210       throw new ServiceException(ex);
2211     } catch (IOException ie) {
2212       throw new ServiceException(ie);
2213     }
2214   }
2215 
2216   /**
2217    * Stop the region server.
2218    *
2219    * @param controller the RPC controller
2220    * @param request the request
2221    * @throws ServiceException
2222    */
2223   @Override
2224   @QosPriority(priority=HConstants.ADMIN_QOS)
2225   public StopServerResponse stopServer(final RpcController controller,
2226       final StopServerRequest request) throws ServiceException {
2227     rpcPreCheck("stopServer");
2228     requestCount.increment();
2229     String reason = request.getReason();
2230     regionServer.stop(reason);
2231     return StopServerResponse.newBuilder().build();
2232   }
2233 
2234   @Override
2235   public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller,
2236       UpdateFavoredNodesRequest request) throws ServiceException {
2237     rpcPreCheck("updateFavoredNodes");
2238     List<UpdateFavoredNodesRequest.RegionUpdateInfo> openInfoList = request.getUpdateInfoList();
2239     UpdateFavoredNodesResponse.Builder respBuilder = UpdateFavoredNodesResponse.newBuilder();
2240     for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) {
2241       HRegionInfo hri = HRegionInfo.convert(regionUpdateInfo.getRegion());
2242       regionServer.updateRegionFavoredNodesMapping(hri.getEncodedName(),
2243         regionUpdateInfo.getFavoredNodesList());
2244     }
2245     respBuilder.setResponse(openInfoList.size());
2246     return respBuilder.build();
2247   }
2248 
2249   /**
2250    * Atomically bulk load several HFiles into an open region
2251    * @return true if successful, false is failed but recoverably (no action)
2252    * @throws ServiceException if failed unrecoverably
2253    */
2254   @Override
2255   public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller,
2256       final BulkLoadHFileRequest request) throws ServiceException {
2257     long start = EnvironmentEdgeManager.currentTime();
2258     List<String> clusterIds = new ArrayList<String>(request.getClusterIdsList());
2259     if(clusterIds.contains(this.regionServer.clusterId)){
2260       return BulkLoadHFileResponse.newBuilder().setLoaded(true).build();
2261     } else {
2262       clusterIds.add(this.regionServer.clusterId);
2263     }
2264     try {
2265       checkOpen();
2266       requestCount.increment();
2267       Region region = getRegion(request.getRegion());
2268       List<Pair<byte[], String>> familyPaths = new ArrayList<Pair<byte[], String>>();
2269       for (FamilyPath familyPath: request.getFamilyPathList()) {
2270         familyPaths.add(new Pair<byte[], String>(familyPath.getFamily().toByteArray(),
2271           familyPath.getPath()));
2272       }
2273       boolean bypass = false;
2274       if (region.getCoprocessorHost() != null) {
2275         bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
2276       }
2277       boolean loaded = false;
2278       try {
2279         if (!bypass) {
2280           loaded = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null, clusterIds);
2281         }
2282       } finally {
2283         if (region.getCoprocessorHost() != null) {
2284           loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, loaded);
2285         }
2286       }
2287       BulkLoadHFileResponse.Builder builder = BulkLoadHFileResponse.newBuilder();
2288       builder.setLoaded(loaded);
2289       return builder.build();
2290     } catch (IOException ie) {
2291       throw new ServiceException(ie);
2292     } finally {
2293       if (regionServer.metricsRegionServer != null) {
2294         regionServer.metricsRegionServer.updateBulkLoad(
2295             EnvironmentEdgeManager.currentTime() - start);
2296       }
2297     }
2298   }
2299 
2300   @Override
2301   public CoprocessorServiceResponse execService(final RpcController controller,
2302       final CoprocessorServiceRequest request) throws ServiceException {
2303     try {
2304       checkOpen();
2305       requestCount.increment();
2306       Region region = getRegion(request.getRegion());
2307       Message result = execServiceOnRegion(region, request.getCall());
2308       CoprocessorServiceResponse.Builder builder =
2309         CoprocessorServiceResponse.newBuilder();
2310       builder.setRegion(RequestConverter.buildRegionSpecifier(
2311         RegionSpecifierType.REGION_NAME, region.getRegionInfo().getRegionName()));
2312       builder.setValue(
2313         builder.getValueBuilder().setName(result.getClass().getName())
2314           .setValue(result.toByteString()));
2315       return builder.build();
2316     } catch (IOException ie) {
2317       throw new ServiceException(ie);
2318     }
2319   }
2320 
2321   private Message execServiceOnRegion(Region region,
2322       final ClientProtos.CoprocessorServiceCall serviceCall) throws IOException {
2323     // ignore the passed in controller (from the serialized call)
2324     ServerRpcController execController = new ServerRpcController();
2325     return region.execService(execController, serviceCall);
2326   }
2327 
2328   /**
2329    * Get data from a table.
2330    *
2331    * @param controller the RPC controller
2332    * @param request the get request
2333    * @throws ServiceException
2334    */
2335   @Override
2336   public GetResponse get(final RpcController controller,
2337       final GetRequest request) throws ServiceException {
2338     long before = EnvironmentEdgeManager.currentTime();
2339     OperationQuota quota = null;
2340     Region region = null;
2341     try {
2342       checkOpen();
2343       requestCount.increment();
2344       rpcGetRequestCount.increment();
2345       region = getRegion(request.getRegion());
2346 
2347       GetResponse.Builder builder = GetResponse.newBuilder();
2348       ClientProtos.Get get = request.getGet();
2349       Boolean existence = null;
2350       Result r = null;
2351       quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
2352 
2353       if (get.hasClosestRowBefore() && get.getClosestRowBefore()) {
2354         if (get.getColumnCount() != 1) {
2355           throw new DoNotRetryIOException(
2356             "get ClosestRowBefore supports one and only one family now, not "
2357               + get.getColumnCount() + " families");
2358         }
2359         byte[] row = get.getRow().toByteArray();
2360         byte[] family = get.getColumn(0).getFamily().toByteArray();
2361         r = region.getClosestRowBefore(row, family);
2362       } else {
2363         Get clientGet = ProtobufUtil.toGet(get);
2364         if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
2365           existence = region.getCoprocessorHost().preExists(clientGet);
2366         }
2367         if (existence == null) {
2368           r = region.get(clientGet);
2369           if (get.getExistenceOnly()) {
2370             boolean exists = r.getExists();
2371             if (region.getCoprocessorHost() != null) {
2372               exists = region.getCoprocessorHost().postExists(clientGet, exists);
2373             }
2374             existence = exists;
2375           }
2376         }
2377       }
2378       if (existence != null){
2379         ClientProtos.Result pbr =
2380             ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
2381         builder.setResult(pbr);
2382       } else  if (r != null) {
2383         ClientProtos.Result pbr;
2384         RpcCallContext call = RpcServer.getCurrentCall();
2385         if (isClientCellBlockSupport(call) && controller instanceof HBaseRpcController
2386             && VersionInfoUtil.hasMinimumVersion(call.getClientVersionInfo(), 1, 3)) {
2387           pbr = ProtobufUtil.toResultNoData(r);
2388           ((HBaseRpcController) controller)
2389               .setCellScanner(CellUtil.createCellScanner(r.rawCells()));
2390           addSize(call, r, null);
2391         } else {
2392           pbr = ProtobufUtil.toResult(r);
2393         }
2394         builder.setResult(pbr);
2395       }
2396       //r.cells is null when an table.exists(get) call
2397       if (r != null && r.rawCells() != null) {
2398         quota.addGetResult(r);
2399       }
2400       return builder.build();
2401     } catch (IOException ie) {
2402       throw new ServiceException(ie);
2403     } finally {
2404       MetricsRegionServer mrs = regionServer.metricsRegionServer;
2405       if (mrs != null) {
2406         HTableDescriptor td = region != null ? region.getTableDesc() : null;
2407         if (td != null) {
2408           mrs.updateGet(td.getTableName(), EnvironmentEdgeManager.currentTime() - before);
2409         }
2410       }
2411       if (quota != null) {
2412         quota.close();
2413       }
2414     }
2415   }
2416 
2417   private void checkBatchSizeAndLogLargeSize(MultiRequest request) throws ServiceException {
2418     int sum = 0;
2419     String firstRegionName = null;
2420     for (RegionAction regionAction : request.getRegionActionList()) {
2421       if (sum == 0) {
2422         firstRegionName = Bytes.toStringBinary(regionAction.getRegion().getValue().toByteArray());
2423       }
2424       sum += regionAction.getActionCount();
2425     }
2426     if (sum > rowSizeWarnThreshold) {
2427       ld.logBatchWarning(firstRegionName, sum, rowSizeWarnThreshold);
2428       if (rejectRowsWithSizeOverThreshold) {
2429         throw new ServiceException(
2430           "Rejecting large batch operation for current batch with firstRegionName: "
2431             + firstRegionName + " , Requested Number of Rows: " + sum + " , Size Threshold: "
2432             + rowSizeWarnThreshold);
2433       }
2434     }
2435   }
2436 
2437   /**
2438    * Execute multiple actions on a table: get, mutate, and/or execCoprocessor
2439    *
2440    * @param rpcc the RPC controller
2441    * @param request the multi request
2442    * @throws ServiceException
2443    */
2444   @Override
2445   public MultiResponse multi(final RpcController rpcc, final MultiRequest request)
2446   throws ServiceException {
2447     try {
2448       checkOpen();
2449     } catch (IOException ie) {
2450       throw new ServiceException(ie);
2451     }
2452 
2453     checkBatchSizeAndLogLargeSize(request);
2454 
2455     // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
2456     // It is also the conduit via which we pass back data.
2457     HBaseRpcController controller = (HBaseRpcController)rpcc;
2458     CellScanner cellScanner = controller != null ? controller.cellScanner(): null;
2459     if (controller != null) {
2460       controller.setCellScanner(null);
2461     }
2462 
2463     long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
2464 
2465     // this will contain all the cells that we need to return. It's created later, if needed.
2466     List<CellScannable> cellsToReturn = null;
2467     MultiResponse.Builder responseBuilder = MultiResponse.newBuilder();
2468     RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder();
2469     Boolean processed = null;
2470 
2471     this.rpcMultiRequestCount.increment();
2472     this.requestCount.increment();
2473     Map<RegionSpecifier, ClientProtos.RegionLoadStats> regionStats = new HashMap<>(request
2474       .getRegionActionCount());
2475     for (RegionAction regionAction : request.getRegionActionList()) {
2476       OperationQuota quota;
2477       Region region;
2478       regionActionResultBuilder.clear();
2479       RegionSpecifier regionSpecifier = regionAction.getRegion();
2480       try {
2481         region = getRegion(regionSpecifier);
2482         quota = getQuotaManager().checkQuota(region, regionAction.getActionList());
2483       } catch (IOException e) {
2484         rpcServer.getMetrics().exception(e);
2485         regionActionResultBuilder.setException(ResponseConverter.buildException(e));
2486         responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
2487         // All Mutations in this RegionAction not executed as we can not see the Region online here
2488         // in this RS. Will be retried from Client. Skipping all the Cells in CellScanner
2489         // corresponding to these Mutations.
2490         skipCellsForMutations(regionAction.getActionList(), cellScanner);
2491         continue;  // For this region it's a failure.
2492       }
2493 
2494       if (regionAction.hasAtomic() && regionAction.getAtomic()) {
2495         // How does this call happen?  It may need some work to play well w/ the surroundings.
2496         // Need to return an item per Action along w/ Action index.  TODO.
2497         try {
2498           if (request.hasCondition()) {
2499             Condition condition = request.getCondition();
2500             byte[] row = condition.getRow().toByteArray();
2501             byte[] family = condition.getFamily().toByteArray();
2502             byte[] qualifier = condition.getQualifier().toByteArray();
2503             CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
2504             ByteArrayComparable comparator =
2505                 ProtobufUtil.toComparator(condition.getComparator());
2506             processed = checkAndRowMutate(region, regionAction.getActionList(),
2507                   cellScanner, row, family, qualifier, compareOp,
2508                   comparator, regionActionResultBuilder);
2509           } else {
2510             mutateRows(region, regionAction.getActionList(), cellScanner,
2511                 regionActionResultBuilder);
2512             processed = Boolean.TRUE;
2513           }
2514         } catch (IOException e) {
2515           rpcServer.getMetrics().exception(e);
2516           // As it's atomic, we may expect it's a global failure.
2517           regionActionResultBuilder.setException(ResponseConverter.buildException(e));
2518         }
2519       } else {
2520         // doNonAtomicRegionMutation manages the exception internally
2521         cellsToReturn = doNonAtomicRegionMutation(region, quota, regionAction, cellScanner,
2522             regionActionResultBuilder, cellsToReturn, nonceGroup);
2523       }
2524       responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
2525       quota.close();
2526       ClientProtos.RegionLoadStats regionLoadStats = ((HRegion)region).getLoadStatistics();
2527       if(regionLoadStats != null) {
2528         regionStats.put(regionSpecifier, regionLoadStats);
2529       }
2530     }
2531     // Load the controller with the Cells to return.
2532     if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) {
2533       controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn));
2534     }
2535 
2536     if (processed != null) {
2537       responseBuilder.setProcessed(processed);
2538     }
2539 
2540     MultiRegionLoadStats.Builder builder = MultiRegionLoadStats.newBuilder();
2541     for(Entry<RegionSpecifier, ClientProtos.RegionLoadStats> stat: regionStats.entrySet()){
2542       builder.addRegion(stat.getKey());
2543       builder.addStat(stat.getValue());
2544     }
2545     responseBuilder.setRegionStatistics(builder);
2546     return responseBuilder.build();
2547   }
2548 
2549   private void skipCellsForMutations(List<Action> actions, CellScanner cellScanner) {
2550     if (cellScanner == null) {
2551       return;
2552     }
2553     for (Action action : actions) {
2554       skipCellsForMutation(action, cellScanner);
2555     }
2556   }
2557 
2558   private void skipCellsForMutation(Action action, CellScanner cellScanner) {
2559     if (cellScanner == null) {
2560       return;
2561     }
2562     try {
2563       if (action.hasMutation()) {
2564         MutationProto m = action.getMutation();
2565         if (m.hasAssociatedCellCount()) {
2566           for (int i = 0; i < m.getAssociatedCellCount(); i++) {
2567             cellScanner.advance();
2568           }
2569         }
2570       }
2571     } catch (IOException e) {
2572       // No need to handle these Individual Muatation level issue. Any way this entire RegionAction
2573       // marked as failed as we could not see the Region here. At client side the top level
2574       // RegionAction exception will be considered first.
2575       LOG.error("Error while skipping Cells in CellScanner for invalid Region Mutations", e);
2576     }
2577   }
2578 
2579   /**
2580    * Mutate data in a table.
2581    *
2582    * @param rpcc the RPC controller
2583    * @param request the mutate request
2584    * @throws ServiceException
2585    */
2586   @Override
2587   public MutateResponse mutate(final RpcController rpcc,
2588       final MutateRequest request) throws ServiceException {
2589     // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
2590     // It is also the conduit via which we pass back data.
2591     HBaseRpcController controller = (HBaseRpcController)rpcc;
2592     CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
2593     OperationQuota quota = null;
2594     RpcCallContext context = RpcServer.getCurrentCall();
2595     MutationType type = null;
2596     Region region = null;
2597     long before = EnvironmentEdgeManager.currentTime();
2598     // Clear scanner so we are not holding on to reference across call.
2599     if (controller != null) {
2600       controller.setCellScanner(null);
2601     }
2602     try {
2603       checkOpen();
2604       requestCount.increment();
2605       rpcMutateRequestCount.increment();
2606       region = getRegion(request.getRegion());
2607       MutateResponse.Builder builder = MutateResponse.newBuilder();
2608       MutationProto mutation = request.getMutation();
2609       if (!region.getRegionInfo().isMetaTable()) {
2610         regionServer.cacheFlusher.reclaimMemStoreMemory();
2611       }
2612       long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
2613       Result r = null;
2614       Boolean processed = null;
2615       type = mutation.getMutateType();
2616       long mutationSize = 0;
2617       quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
2618       switch (type) {
2619       case APPEND:
2620         // TODO: this doesn't actually check anything.
2621         r = append(region, quota, mutation, cellScanner, nonceGroup);
2622         break;
2623       case INCREMENT:
2624         // TODO: this doesn't actually check anything.
2625         r = increment(region, quota, mutation, cellScanner, nonceGroup);
2626         break;
2627       case PUT:
2628         Put put = ProtobufUtil.toPut(mutation, cellScanner);
2629         checkCellSizeLimit(region, put);
2630         quota.addMutation(put);
2631         if (request.hasCondition()) {
2632           Condition condition = request.getCondition();
2633           byte[] row = condition.getRow().toByteArray();
2634           byte[] family = condition.getFamily().toByteArray();
2635           byte[] qualifier = condition.getQualifier().toByteArray();
2636           CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
2637           ByteArrayComparable comparator =
2638             ProtobufUtil.toComparator(condition.getComparator());
2639           if (region.getCoprocessorHost() != null) {
2640             processed = region.getCoprocessorHost().preCheckAndPut(
2641               row, family, qualifier, compareOp, comparator, put);
2642           }
2643           if (processed == null) {
2644             boolean result = region.checkAndMutate(row, family,
2645               qualifier, compareOp, comparator, put, true);
2646             if (region.getCoprocessorHost() != null) {
2647               result = region.getCoprocessorHost().postCheckAndPut(row, family,
2648                 qualifier, compareOp, comparator, put, result);
2649             }
2650             processed = result;
2651           }
2652         } else {
2653           region.put(put);
2654           processed = Boolean.TRUE;
2655         }
2656         break;
2657       case DELETE:
2658         Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
2659         checkCellSizeLimit(region, delete);
2660         quota.addMutation(delete);
2661         if (request.hasCondition()) {
2662           Condition condition = request.getCondition();
2663           byte[] row = condition.getRow().toByteArray();
2664           byte[] family = condition.getFamily().toByteArray();
2665           byte[] qualifier = condition.getQualifier().toByteArray();
2666           CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
2667           ByteArrayComparable comparator =
2668             ProtobufUtil.toComparator(condition.getComparator());
2669           if (region.getCoprocessorHost() != null) {
2670             processed = region.getCoprocessorHost().preCheckAndDelete(
2671               row, family, qualifier, compareOp, comparator, delete);
2672           }
2673           if (processed == null) {
2674             boolean result = region.checkAndMutate(row, family,
2675               qualifier, compareOp, comparator, delete, true);
2676             if (region.getCoprocessorHost() != null) {
2677               result = region.getCoprocessorHost().postCheckAndDelete(row, family,
2678                 qualifier, compareOp, comparator, delete, result);
2679             }
2680             processed = result;
2681           }
2682         } else {
2683           region.delete(delete);
2684           processed = Boolean.TRUE;
2685         }
2686         break;
2687       default:
2688           throw new DoNotRetryIOException(
2689             "Unsupported mutate type: " + type.name());
2690       }
2691       if (processed != null) builder.setProcessed(processed.booleanValue());
2692       addResult(builder, r, controller);
2693       boolean clientCellBlockSupported = isClientCellBlockSupport(context);
2694       if (clientCellBlockSupported) {
2695         addSize(context, r, null);
2696       }
2697       return builder.build();
2698     } catch (IOException ie) {
2699       regionServer.checkFileSystem();
2700       throw new ServiceException(ie);
2701     } finally {
2702       if (quota != null) {
2703         quota.close();
2704       }
2705       // Update metrics
2706       if (regionServer.metricsRegionServer != null && type != null) {
2707         long after = EnvironmentEdgeManager.currentTime();
2708         switch (type) {
2709         case DELETE:
2710           if (request.hasCondition()) {
2711             regionServer.metricsRegionServer.updateCheckAndDelete(
2712                 region == null ? null : region.getRegionInfo().getTable(), after - before);
2713           } else {
2714             regionServer.metricsRegionServer.updateDelete(
2715                 region == null ? null : region.getRegionInfo().getTable(), after - before);
2716           }
2717           break;
2718         case PUT:
2719           if (request.hasCondition()) {
2720             regionServer.metricsRegionServer.updateCheckAndPut(
2721                 region == null ? null : region.getRegionInfo().getTable(), after - before);
2722           } else {
2723             regionServer.metricsRegionServer.updatePut(
2724                 region == null ? null : region.getRegionInfo().getTable(), after - before);
2725           }
2726           break;
2727         default:
2728           break;
2729         }
2730       }
2731     }
2732   }
2733 
2734   // This is used to keep compatible with the old client implementation. Consider remove it if we
2735   // decide to drop the support of the client that still sends close request to a region scanner
2736   // which has already been exhausted.
2737   @Deprecated
2738   private static final IOException SCANNER_ALREADY_CLOSED = new IOException() {
2739 
2740     private static final long serialVersionUID = -4305297078988180130L;
2741 
2742     @Override
2743     public synchronized Throwable fillInStackTrace() {
2744       return this;
2745     }
2746   };
2747 
2748   private RegionScannerHolder getRegionScanner(ScanRequest request) throws IOException {
2749     String scannerName = Long.toString(request.getScannerId());
2750     RegionScannerHolder rsh = scanners.get(scannerName);
2751     if (rsh == null) {
2752       // just ignore the next or close request if scanner does not exists.
2753       if (closedScanners.getIfPresent(scannerName) != null) {
2754         throw SCANNER_ALREADY_CLOSED;
2755       } else {
2756         LOG.warn("Client tried to access missing scanner " + scannerName);
2757         throw new UnknownScannerException(
2758             "Unknown scanner '" + scannerName + "'. This can happen due to any of the following " +
2759                 "reasons: a) Scanner id given is wrong, b) Scanner lease expired because of " +
2760                 "long wait between consecutive client checkins, c) Server may be closing down, " +
2761                 "d) RegionServer restart during upgrade.\nIf the issue is due to reason (b), a " +
2762                 "possible fix would be increasing the value of" +
2763                 "'hbase.client.scanner.timeout.period' configuration.");
2764       }
2765     }
2766     HRegionInfo hri = rsh.s.getRegionInfo();
2767     // Yes, should be the same instance
2768     if (regionServer.getOnlineRegion(hri.getRegionName()) != rsh.r) {
2769       String msg = "Region was re-opened after the scanner" + scannerName + " was created: "
2770           + hri.getRegionNameAsString();
2771       LOG.warn(msg + ", closing...");
2772       scanners.remove(scannerName);
2773       try {
2774         rsh.s.close();
2775       } catch (IOException e) {
2776         LOG.warn("Getting exception closing " + scannerName, e);
2777       } finally {
2778         try {
2779           regionServer.leases.cancelLease(scannerName);
2780         } catch (LeaseException e) {
2781           LOG.warn("Getting exception closing " + scannerName, e);
2782         }
2783       }
2784       throw new NotServingRegionException(msg);
2785     }
2786     return rsh;
2787   }
2788 
2789   private RegionScannerHolder newRegionScanner(ScanRequest request, ScanResponse.Builder builder)
2790       throws IOException {
2791     Region region = getRegion(request.getRegion());
2792     ClientProtos.Scan protoScan = request.getScan();
2793     boolean isLoadingCfsOnDemandSet = protoScan.hasLoadColumnFamiliesOnDemand();
2794     Scan scan = ProtobufUtil.toScan(protoScan);
2795     // if the request doesn't set this, get the default region setting.
2796     if (!isLoadingCfsOnDemandSet) {
2797       scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
2798     }
2799     if (!scan.hasFamilies()) {
2800       // Adding all families to scanner
2801       for (byte[] family : region.getTableDesc().getFamiliesKeys()) {
2802         scan.addFamily(family);
2803       }
2804     }
2805     RegionScanner scanner = null;
2806     if (region.getCoprocessorHost() != null) {
2807       scanner = region.getCoprocessorHost().preScannerOpen(scan);
2808     }
2809     if (scanner == null) {
2810       scanner = region.getScanner(scan);
2811     }
2812     if (region.getCoprocessorHost() != null) {
2813       scanner = region.getCoprocessorHost().postScannerOpen(scan, scanner);
2814     }
2815     long scannerId = scannerIdGenerator.generateNewScannerId();
2816     builder.setScannerId(scannerId);
2817     builder.setMvccReadPoint(scanner.getMvccReadPoint());
2818     builder.setTtl(scannerLeaseTimeoutPeriod);
2819     String scannerName = String.valueOf(scannerId);
2820     return addScanner(scannerName, scanner, region, scan.isNeedCursorResult());
2821   }
2822 
2823   private void checkScanNextCallSeq(ScanRequest request, RegionScannerHolder rsh)
2824       throws OutOfOrderScannerNextException {
2825     // if nextCallSeq does not match throw Exception straight away. This needs to be
2826     // performed even before checking of Lease.
2827     // See HBASE-5974
2828     if (request.hasNextCallSeq()) {
2829       long callSeq = request.getNextCallSeq();
2830       if (!rsh.incNextCallSeq(callSeq)) {
2831         throw new OutOfOrderScannerNextException("Expected nextCallSeq: " + rsh.getNextCallSeq()
2832             + " But the nextCallSeq got from client: " + request.getNextCallSeq() + "; request="
2833             + TextFormat.shortDebugString(request));
2834       }
2835     }
2836   }
2837 
2838   private void addScannerLeaseBack(Leases.Lease lease) {
2839     try {
2840       regionServer.leases.addLease(lease);
2841     } catch (LeaseStillHeldException e) {
2842       // should not happen as the scanner id is unique.
2843       throw new AssertionError(e);
2844     }
2845   }
2846 
2847   private long getTimeLimit(HBaseRpcController controller,
2848       boolean allowHeartbeatMessages) {
2849     // Set the time limit to be half of the more restrictive timeout value (one of the
2850     // timeout values must be positive). In the event that both values are positive, the
2851     // more restrictive of the two is used to calculate the limit.
2852     if (allowHeartbeatMessages && (scannerLeaseTimeoutPeriod > 0 || rpcTimeout > 0)) {
2853       long timeLimitDelta;
2854       if (scannerLeaseTimeoutPeriod > 0 && rpcTimeout > 0) {
2855         timeLimitDelta = Math.min(scannerLeaseTimeoutPeriod, rpcTimeout);
2856       } else {
2857         timeLimitDelta = scannerLeaseTimeoutPeriod > 0 ? scannerLeaseTimeoutPeriod : rpcTimeout;
2858       }
2859       if (controller != null && controller.getCallTimeout() > 0) {
2860         timeLimitDelta = Math.min(timeLimitDelta, controller.getCallTimeout());
2861       }
2862       // Use half of whichever timeout value was more restrictive... But don't allow
2863       // the time limit to be less than the allowable minimum (could cause an
2864       // immediatate timeout before scanning any data).
2865       timeLimitDelta = Math.max(timeLimitDelta / 2, minimumScanTimeLimitDelta);
2866       // XXX: Can not use EnvironmentEdge here because TestIncrementTimeRange use a
2867       // ManualEnvironmentEdge. Consider using System.nanoTime instead.
2868       return System.currentTimeMillis() + timeLimitDelta;
2869     }
2870     // Default value of timeLimit is negative to indicate no timeLimit should be
2871     // enforced.
2872     return -1L;
2873   }
2874 
2875   private void checkLimitOfRows(int numOfCompleteRows, int limitOfRows, boolean moreRows,
2876       ScannerContext scannerContext, ScanResponse.Builder builder) {
2877     if (numOfCompleteRows >= limitOfRows) {
2878       if (LOG.isTraceEnabled()) {
2879         LOG.trace("Done scanning, limit of rows reached, moreRows: " + moreRows +
2880             " scannerContext: " + scannerContext);
2881       }
2882       builder.setMoreResults(false);
2883     }
2884   }
2885 
2886   // return whether we have more results in region.
2887   private void scan(HBaseRpcController controller, ScanRequest request, RegionScannerHolder rsh,
2888       long maxQuotaResultSize, int maxResults, int limitOfRows, List<Result> results,
2889       ScanResponse.Builder builder, MutableObject lastBlock, RpcCallContext context)
2890       throws IOException {
2891     Region region = rsh.r;
2892     RegionScanner scanner = rsh.s;
2893     long maxResultSize;
2894     if (scanner.getMaxResultSize() > 0) {
2895       maxResultSize = Math.min(scanner.getMaxResultSize(), maxQuotaResultSize);
2896     } else {
2897       maxResultSize = maxQuotaResultSize;
2898     }
2899     // This is cells inside a row. Default size is 10 so if many versions or many cfs,
2900     // then we'll resize. Resizings show in profiler. Set it higher than 10. For now
2901     // arbitrary 32. TODO: keep record of general size of results being returned.
2902     List<Cell> values = new ArrayList<Cell>(32);
2903     region.startRegionOperation(Operation.SCAN);
2904     long before = EnvironmentEdgeManager.currentTime();
2905     // Used to check if we've matched the row limit set on the Scan
2906     int numOfCompleteRows = 0;
2907     // Count of times we call nextRaw; can be > numOfCompleteRows.
2908     int numOfNextRawCalls = 0;
2909     try {
2910       int numOfResults = 0;
2911       synchronized (scanner) {
2912         boolean stale = (region.getRegionInfo().getReplicaId() != 0);
2913         boolean clientHandlesPartials =
2914             request.hasClientHandlesPartials() && request.getClientHandlesPartials();
2915         boolean clientHandlesHeartbeats =
2916             request.hasClientHandlesHeartbeats() && request.getClientHandlesHeartbeats();
2917 
2918         // On the server side we must ensure that the correct ordering of partial results is
2919         // returned to the client to allow them to properly reconstruct the partial results.
2920         // If the coprocessor host is adding to the result list, we cannot guarantee the
2921         // correct ordering of partial results and so we prevent partial results from being
2922         // formed.
2923         boolean serverGuaranteesOrderOfPartials = results.isEmpty();
2924         boolean allowPartialResults = clientHandlesPartials && serverGuaranteesOrderOfPartials;
2925         boolean moreRows = false;
2926 
2927         // Heartbeat messages occur when the processing of the ScanRequest is exceeds a
2928         // certain time threshold on the server. When the time threshold is exceeded, the
2929         // server stops the scan and sends back whatever Results it has accumulated within
2930         // that time period (may be empty). Since heartbeat messages have the potential to
2931         // create partial Results (in the event that the timeout occurs in the middle of a
2932         // row), we must only generate heartbeat messages when the client can handle both
2933         // heartbeats AND partials
2934         boolean allowHeartbeatMessages = clientHandlesHeartbeats && allowPartialResults;
2935 
2936         long timeLimit = getTimeLimit(controller, allowHeartbeatMessages);
2937 
2938         final LimitScope sizeScope =
2939             allowPartialResults ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
2940         final LimitScope timeScope =
2941             allowHeartbeatMessages ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
2942 
2943         boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics();
2944 
2945         // Configure with limits for this RPC. Set keep progress true since size progress
2946         // towards size limit should be kept between calls to nextRaw
2947         ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true);
2948         contextBuilder.setSizeLimit(sizeScope, maxResultSize);
2949         contextBuilder.setBatchLimit(scanner.getBatch());
2950         contextBuilder.setTimeLimit(timeScope, timeLimit);
2951         contextBuilder.setTrackMetrics(trackMetrics);
2952         ScannerContext scannerContext = contextBuilder.build();
2953         boolean limitReached = false;
2954         while (numOfResults < maxResults) {
2955           // Reset the batch progress to 0 before every call to RegionScanner#nextRaw. The
2956           // batch limit is a limit on the number of cells per Result. Thus, if progress is
2957           // being tracked (i.e. scannerContext.keepProgress() is true) then we need to
2958           // reset the batch progress between nextRaw invocations since we don't want the
2959           // batch progress from previous calls to affect future calls
2960           scannerContext.setBatchProgress(0);
2961 
2962           // Collect values to be returned here
2963           moreRows = scanner.nextRaw(values, scannerContext);
2964           numOfNextRawCalls++;
2965 
2966           if (!values.isEmpty()) {
2967             if (limitOfRows > 0) {
2968               // First we need to check if the last result is partial and we have a row change. If
2969               // so then we need to increase the numOfCompleteRows.
2970               if (results.isEmpty()) {
2971                 if (rsh.rowOfLastPartialResult != null &&
2972                     !CellUtil.matchingRow(values.get(0), rsh.rowOfLastPartialResult)) {
2973                   numOfCompleteRows++;
2974                   checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext,
2975                     builder);
2976                 }
2977               } else {
2978                 Result lastResult = results.get(results.size() - 1);
2979                 if (lastResult.mayHaveMoreCellsInRow() &&
2980                     !CellUtil.matchingRow(values.get(0), lastResult.getRow())) {
2981                   numOfCompleteRows++;
2982                   checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext,
2983                     builder);
2984                 }
2985               }
2986               if (builder.hasMoreResults() && !builder.getMoreResults()) {
2987                 break;
2988               }
2989             }
2990             boolean mayHaveMoreCellsInRow = scannerContext.mayHaveMoreCellsInRow();
2991             Result r = Result.create(values, null, stale, mayHaveMoreCellsInRow);
2992             lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
2993             results.add(r);
2994             numOfResults++;
2995             if (!mayHaveMoreCellsInRow && limitOfRows > 0) {
2996               numOfCompleteRows++;
2997               checkLimitOfRows(numOfCompleteRows, limitOfRows, moreRows, scannerContext, builder);
2998               if (builder.hasMoreResults() && !builder.getMoreResults()) {
2999                 break;
3000               }
3001             }
3002           } else if (!moreRows && !results.isEmpty()) {
3003             // No more cells for the scan here, we need to ensure that the mayHaveMoreCellsInRow of
3004             // last result is false. Otherwise it's possible that: the first nextRaw returned
3005             // because BATCH_LIMIT_REACHED (BTW it happen to exhaust all cells of the scan),so the
3006             // last result's mayHaveMoreCellsInRow will be true. while the following nextRaw will
3007             // return with moreRows=false, which means moreResultsInRegion would be false, it will
3008             // be a contradictory state (HBASE-21206).
3009             int lastIdx = results.size() - 1;
3010             Result r = results.get(lastIdx);
3011             if (r.mayHaveMoreCellsInRow()) {
3012               results.set(lastIdx, Result.create(r.rawCells(), r.getExists(), r.isStale(), false));
3013             }
3014           }
3015           boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS);
3016           boolean timeLimitReached = scannerContext.checkTimeLimit(LimitScope.BETWEEN_ROWS);
3017           boolean resultsLimitReached = numOfResults >= maxResults;
3018           limitReached = sizeLimitReached || timeLimitReached || resultsLimitReached;
3019 
3020           if (limitReached || !moreRows) {
3021             if (LOG.isTraceEnabled()) {
3022               LOG.trace("Done scanning. limitReached: " + limitReached + " moreRows: " + moreRows
3023                   + " scannerContext: " + scannerContext);
3024             }
3025             // We only want to mark a ScanResponse as a heartbeat message in the event that
3026             // there are more values to be read server side. If there aren't more values,
3027             // marking it as a heartbeat is wasteful because the client will need to issue
3028             // another ScanRequest only to realize that they already have all the values
3029             if (moreRows && timeLimitReached) {
3030               // Heartbeat messages occur when the time limit has been reached.
3031               builder.setHeartbeatMessage(true);
3032               if (rsh.needCursor) {
3033                 Cell cursorCell = scannerContext.getLastPeekedCell();
3034                 if (cursorCell != null) {
3035                   builder.setCursor(ProtobufUtil.toCursor(cursorCell));
3036                 }
3037               }
3038             }
3039             break;
3040           }
3041           values.clear();
3042         }
3043         builder.setMoreResultsInRegion(moreRows);
3044         // Check to see if the client requested that we track metrics server side. If the
3045         // client requested metrics, retrieve the metrics from the scanner context.
3046         if (trackMetrics) {
3047           Map<String, Long> metrics = scannerContext.getMetrics().getMetricsMap();
3048           ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder();
3049           NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder();
3050 
3051           for (Entry<String, Long> entry : metrics.entrySet()) {
3052             pairBuilder.setName(entry.getKey());
3053             pairBuilder.setValue(entry.getValue());
3054             metricBuilder.addMetrics(pairBuilder.build());
3055           }
3056 
3057           builder.setScanMetrics(metricBuilder.build());
3058         }
3059       }
3060     } finally {
3061       region.closeRegionOperation();
3062       // Update serverside metrics, even on error.
3063       long end = EnvironmentEdgeManager.currentTime();
3064       long responseCellSize = context != null ? context.getResponseCellSize() : 0;
3065       region.getMetrics().updateScanTime(end - before);
3066       if (regionServer.metricsRegionServer != null) {
3067         regionServer.metricsRegionServer.updateScanSize(
3068             region.getTableDesc().getTableName(), responseCellSize);
3069         regionServer.metricsRegionServer.updateScanTime(
3070             region.getTableDesc().getTableName(), end - before);
3071         regionServer.metricsRegionServer
3072           .updateReadQueryMeter(region.getRegionInfo().getTable(), numOfNextRawCalls);
3073       }
3074     }
3075     // coprocessor postNext hook
3076     if (region.getCoprocessorHost() != null) {
3077       region.getCoprocessorHost().postScannerNext(scanner, results, maxResults, true);
3078     }
3079   }
3080 
3081   /**
3082    * Scan data in a table.
3083    *
3084    * @param controller the RPC controller
3085    * @param request the scan request
3086    * @throws ServiceException
3087    */
3088   @Override
3089   public ScanResponse scan(final RpcController controller, final ScanRequest request)
3090       throws ServiceException {
3091     if (controller != null && !(controller instanceof HBaseRpcController)) {
3092       throw new UnsupportedOperationException(
3093           "We only do HBaseRpcController! FIX IF A PROBLEM: " + controller);
3094     }
3095     if (!request.hasScannerId() && !request.hasScan()) {
3096       throw new ServiceException(
3097           new DoNotRetryIOException("Missing required input: scannerId or scan"));
3098     }
3099     try {
3100       checkOpen();
3101     } catch (IOException e) {
3102       if (request.hasScannerId()) {
3103         String scannerName = Long.toString(request.getScannerId());
3104         if (LOG.isDebugEnabled()) {
3105           LOG.debug(
3106             "Server shutting down and client tried to access missing scanner " + scannerName);
3107         }
3108         if (regionServer.leases != null) {
3109           try {
3110             regionServer.leases.cancelLease(scannerName);
3111           } catch (LeaseException le) {
3112             // No problem, ignore
3113             if (LOG.isTraceEnabled()) {
3114               LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
3115             }
3116           }
3117         }
3118       }
3119       throw new ServiceException(e);
3120     }
3121     requestCount.increment();
3122     rpcScanRequestCount.increment();
3123     RegionScannerHolder rsh;
3124     ScanResponse.Builder builder = ScanResponse.newBuilder();
3125     try {
3126       if (request.hasScannerId()) {
3127         // The downstream projects such as AsyncHBase in OpenTSDB need this value. See HBASE-18000
3128         // for more details.
3129         builder.setScannerId(request.getScannerId());
3130         rsh = getRegionScanner(request);
3131       } else {
3132         rsh = newRegionScanner(request, builder);
3133       }
3134     } catch (IOException e) {
3135       if (e == SCANNER_ALREADY_CLOSED) {
3136         // Now we will close scanner automatically if there are no more results for this region but
3137         // the old client will still send a close request to us. Just ignore it and return.
3138         return builder.build();
3139       }
3140       throw new ServiceException(e);
3141     }
3142     Region region = rsh.r;
3143     String scannerName = rsh.scannerName;
3144     Leases.Lease lease;
3145     try {
3146       // Remove lease while its being processed in server; protects against case
3147       // where processing of request takes > lease expiration time.
3148       lease = regionServer.leases.removeLease(scannerName);
3149     } catch (LeaseException e) {
3150       throw new ServiceException(e);
3151     }
3152     if (request.hasRenew() && request.getRenew()) {
3153       // add back and return
3154       addScannerLeaseBack(lease);
3155       try {
3156         checkScanNextCallSeq(request, rsh);
3157       } catch (OutOfOrderScannerNextException e) {
3158         throw new ServiceException(e);
3159       }
3160       return builder.build();
3161     }
3162     OperationQuota quota;
3163     try {
3164       quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
3165     } catch (IOException e) {
3166       addScannerLeaseBack(lease);
3167       throw new ServiceException(e);
3168     };
3169     try {
3170       checkScanNextCallSeq(request, rsh);
3171     } catch (OutOfOrderScannerNextException e) {
3172       addScannerLeaseBack(lease);
3173       throw new ServiceException(e);
3174     }
3175     // Now we have increased the next call sequence. If we give client an error, the retry will
3176     // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
3177     // and then client will try to open a new scanner.
3178     boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
3179     int rows; // this is scan.getCaching
3180     if (request.hasNumberOfRows()) {
3181       rows = request.getNumberOfRows();
3182     } else {
3183       rows = closeScanner ? 0 : 1;
3184     }
3185     RpcCallContext context = RpcServer.getCurrentCall();
3186     // now let's do the real scan.
3187     long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
3188     RegionScanner scanner = rsh.s;
3189     // this is the limit of rows for this scan, if we the number of rows reach this value, we will
3190     // close the scanner.
3191     int limitOfRows;
3192     if (request.hasLimitOfRows()) {
3193       limitOfRows = request.getLimitOfRows();
3194     } else {
3195       limitOfRows = -1;
3196     }
3197     MutableObject lastBlock = new MutableObject();
3198     boolean scannerClosed = false;
3199     try {
3200       List<Result> results = new ArrayList<>();
3201       if (rows > 0) {
3202         boolean done = false;
3203         // Call coprocessor. Get region info from scanner.
3204         if (region.getCoprocessorHost() != null) {
3205           Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
3206           if (!results.isEmpty()) {
3207             for (Result r : results) {
3208               lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
3209             }
3210           }
3211           if (bypass != null && bypass.booleanValue()) {
3212             done = true;
3213           }
3214         }
3215         if (!done) {
3216           scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows,
3217             results, builder, lastBlock, context);
3218         } else {
3219           builder.setMoreResultsInRegion(!results.isEmpty());
3220         }
3221       } else {
3222         // This is a open scanner call with numberOfRow = 0, so set more results in region to true.
3223         builder.setMoreResultsInRegion(true);
3224       }
3225 
3226       quota.addScanResult(results);
3227       addResults(builder, results, (HBaseRpcController) controller,
3228         RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()));
3229       if (scanner.isFilterDone() && results.isEmpty()) {
3230         // If the scanner's filter - if any - is done with the scan
3231         // only set moreResults to false if the results is empty. This is used to keep compatible
3232         // with the old scan implementation where we just ignore the returned results if moreResults
3233         // is false. Can remove the isEmpty check after we get rid of the old implementation.
3234         builder.setMoreResults(false);
3235       }
3236       // Later we may close the scanner depending on this flag so here we need to make sure that we
3237       // have already set this flag.
3238       assert builder.hasMoreResultsInRegion();
3239       // we only set moreResults to false in the above code, so set it to true if we haven't set it
3240       // yet.
3241       if (!builder.hasMoreResults()) {
3242         builder.setMoreResults(true);
3243       }
3244       if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
3245         // Record the last cell of the last result if it is a partial result
3246         // We need this to calculate the complete rows we have returned to client as the
3247         // mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
3248         // current row. We may filter out all the remaining cells for the current row and just
3249         // return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
3250         // check for row change.
3251         Result lastResult = results.get(results.size() - 1);
3252         if (lastResult.mayHaveMoreCellsInRow()) {
3253           rsh.rowOfLastPartialResult = lastResult.getRow();
3254         } else {
3255           rsh.rowOfLastPartialResult = null;
3256         }
3257       }
3258       if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
3259         scannerClosed = true;
3260         closeScanner(region, scanner, scannerName, context);
3261       }
3262       return builder.build();
3263     } catch (IOException e) {
3264       try {
3265         // scanner is closed here
3266         scannerClosed = true;
3267         // The scanner state might be left in a dirty state, so we will tell the Client to
3268         // fail this RPC and close the scanner while opening up another one from the start of
3269         // row that the client has last seen.
3270         closeScanner(region, scanner, scannerName, context);
3271 
3272         // If it is a DoNotRetryIOException already, throw as it is. Unfortunately, DNRIOE is
3273         // used in two different semantics.
3274         // (1) The first is to close the client scanner and bubble up the exception all the way
3275         // to the application. This is preferred when the exception is really un-recoverable
3276         // (like CorruptHFileException, etc). Plain DoNotRetryIOException also falls into this
3277         // bucket usually.
3278         // (2) Second semantics is to close the current region scanner only, but continue the
3279         // client scanner by overriding the exception. This is usually UnknownScannerException,
3280         // OutOfOrderScannerNextException, etc where the region scanner has to be closed, but the
3281         // application-level ClientScanner has to continue without bubbling up the exception to
3282         // the client. See ClientScanner code to see how it deals with these special exceptions.
3283         if (e instanceof DoNotRetryIOException) {
3284           throw e;
3285         }
3286 
3287         // If it is a FileNotFoundException, wrap as a
3288         // DoNotRetryIOException. This can avoid the retry in ClientScanner.
3289         if (e instanceof FileNotFoundException) {
3290           throw new DoNotRetryIOException(e);
3291         }
3292 
3293         // We closed the scanner already. Instead of throwing the IOException, and client
3294         // retrying with the same scannerId only to get USE on the next RPC, we directly throw
3295         // a special exception to save an RPC.
3296         if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
3297           // 1.4.0+ clients know how to handle
3298           throw new ScannerResetException("Scanner is closed on the server-side", e);
3299         } else {
3300           // older clients do not know about SRE. Just throw USE, which they will handle
3301           throw new UnknownScannerException("Throwing UnknownScannerException to reset the client"
3302               + " scanner state for clients older than 1.4.", e);
3303         }
3304       } catch (IOException ioe) {
3305         throw new ServiceException(ioe);
3306       }
3307     } finally {
3308       if (!scannerClosed) {
3309         // Adding resets expiration time on lease.
3310         addScannerLeaseBack(lease);
3311       }
3312       quota.close();
3313     }
3314   }
3315 
3316   private void closeScanner(Region region, RegionScanner scanner, String scannerName,
3317       RpcCallContext context) throws IOException {
3318     if (region.getCoprocessorHost() != null) {
3319       if (region.getCoprocessorHost().preScannerClose(scanner)) {
3320         // bypass the actual close.
3321         return;
3322       }
3323     }
3324     RegionScannerHolder rsh = scanners.remove(scannerName);
3325     if (rsh != null) {
3326       rsh.s.close();
3327       if (region.getCoprocessorHost() != null) {
3328         region.getCoprocessorHost().postScannerClose(scanner);
3329       }
3330       closedScanners.put(scannerName, scannerName);
3331     }
3332   }
3333 
3334   @Override
3335   public CoprocessorServiceResponse execRegionServerService(RpcController controller,
3336       CoprocessorServiceRequest request) throws ServiceException {
3337     rpcPreCheck("execRegionServerService");
3338     return regionServer.execRegionServerService(controller, request);
3339   }
3340 
3341   @Override
3342   public UpdateConfigurationResponse updateConfiguration(
3343       RpcController controller, UpdateConfigurationRequest request)
3344       throws ServiceException {
3345     try {
3346       requirePermission("updateConfiguration", Permission.Action.ADMIN);
3347       this.regionServer.updateConfiguration();
3348     } catch (Exception e) {
3349       throw new ServiceException(e);
3350     }
3351     return UpdateConfigurationResponse.getDefaultInstance();
3352   }
3353 
3354   private List<SlowLogPayload> getSlowLogPayloads(SlowLogResponseRequest request,
3355     NamedQueueRecorder namedQueueRecorder) {
3356     if (namedQueueRecorder == null) {
3357       return Collections.emptyList();
3358     }
3359     List<SlowLogPayload> slowLogPayloads;
3360     NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest();
3361     namedQueueGetRequest.setNamedQueueEvent(RpcLogDetails.SLOW_LOG_EVENT);
3362     namedQueueGetRequest.setSlowLogResponseRequest(request);
3363     NamedQueueGetResponse namedQueueGetResponse =
3364       namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest);
3365     slowLogPayloads = namedQueueGetResponse != null ?
3366       namedQueueGetResponse.getSlowLogPayloads() : new ArrayList<SlowLogPayload>();
3367     return slowLogPayloads;
3368   }
3369 
3370   @Override
3371   @QosPriority(priority=HConstants.ADMIN_QOS)
3372   public ClearSlowLogResponses clearSlowLogsResponses(RpcController controller,
3373       ClearSlowLogResponseRequest request) throws ServiceException {
3374     rpcPreCheck("clearSlowLogsResponses");
3375     final NamedQueueRecorder namedQueueRecorder =
3376       this.regionServer.getNamedQueueRecorder();
3377     boolean slowLogsCleaned = false;
3378     if (namedQueueRecorder != null) {
3379       namedQueueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG);
3380       slowLogsCleaned = true;
3381     }
3382     ClearSlowLogResponses clearSlowLogResponses = ClearSlowLogResponses.newBuilder()
3383       .setIsCleaned(slowLogsCleaned)
3384       .build();
3385     return clearSlowLogResponses;
3386   }
3387 
3388   @Override
3389   @QosPriority(priority = HConstants.ADMIN_QOS)
3390   public HBaseProtos.LogEntry getLogEntries(RpcController controller,
3391       HBaseProtos.LogRequest request) throws ServiceException {
3392     try {
3393       final String logClassName = request.getLogClassName();
3394       Class<?> logClass = Class.forName(logClassName)
3395         .asSubclass(Message.class);
3396       Method method = logClass.getMethod("parseFrom", ByteString.class);
3397       if (logClassName.contains("SlowLogResponseRequest")) {
3398         SlowLogResponseRequest slowLogResponseRequest =
3399           (SlowLogResponseRequest) method.invoke(null, request.getLogMessage());
3400         final NamedQueueRecorder namedQueueRecorder =
3401           this.regionServer.getNamedQueueRecorder();
3402         final List<SlowLogPayload> slowLogPayloads =
3403           getSlowLogPayloads(slowLogResponseRequest, namedQueueRecorder);
3404         SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder()
3405           .addAllSlowLogPayloads(slowLogPayloads)
3406           .build();
3407         return HBaseProtos.LogEntry.newBuilder()
3408           .setLogClassName(slowLogResponses.getClass().getName())
3409           .setLogMessage(slowLogResponses.toByteString()).build();
3410       }
3411     } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException
3412       | InvocationTargetException e) {
3413       LOG.error("Error while retrieving log entries.", e);
3414       throw new ServiceException(e);
3415     }
3416     throw new ServiceException("Invalid request params");
3417   }
3418 }