View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.hbase.ipc;
20  
21  import static org.apache.hadoop.hbase.ipc.IPCUtil.toIOE;
22  import static org.apache.hadoop.hbase.ipc.IPCUtil.wrapException;
23  
24  import com.google.common.base.Preconditions;
25  import com.google.common.cache.CacheBuilder;
26  import com.google.common.cache.CacheLoader;
27  import com.google.common.cache.LoadingCache;
28  import com.google.protobuf.BlockingRpcChannel;
29  import com.google.protobuf.Descriptors;
30  import com.google.protobuf.Message;
31  import com.google.protobuf.RpcCallback;
32  import com.google.protobuf.RpcChannel;
33  import com.google.protobuf.RpcController;
34  import com.google.protobuf.ServiceException;
35  
36  import io.netty.util.HashedWheelTimer;
37  
38  import java.io.IOException;
39  import java.net.InetSocketAddress;
40  import java.net.SocketAddress;
41  import java.net.UnknownHostException;
42  import java.util.Collection;
43  import java.util.HashMap;
44  import java.util.Map;
45  import java.util.concurrent.Executors;
46  import java.util.concurrent.ScheduledExecutorService;
47  import java.util.concurrent.ScheduledFuture;
48  import java.util.concurrent.TimeUnit;
49  import java.util.concurrent.atomic.AtomicInteger;
50  
51  import org.apache.commons.logging.Log;
52  import org.apache.commons.logging.LogFactory;
53  import org.apache.hadoop.conf.Configuration;
54  import org.apache.hadoop.hbase.HConstants;
55  import org.apache.hadoop.hbase.ServerName;
56  import org.apache.hadoop.hbase.classification.InterfaceAudience;
57  import org.apache.hadoop.hbase.client.MetricsConnection;
58  import org.apache.hadoop.hbase.codec.Codec;
59  import org.apache.hadoop.hbase.codec.KeyValueCodec;
60  import org.apache.hadoop.hbase.net.Address;
61  import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind;
62  import org.apache.hadoop.hbase.security.User;
63  import org.apache.hadoop.hbase.security.UserProvider;
64  import org.apache.hadoop.hbase.security.token.AuthenticationTokenSelector;
65  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
66  import org.apache.hadoop.hbase.util.PoolMap;
67  import org.apache.hadoop.hbase.util.Threads;
68  import org.apache.hadoop.io.compress.CompressionCodec;
69  import org.apache.hadoop.ipc.RemoteException;
70  import org.apache.hadoop.security.token.TokenIdentifier;
71  import org.apache.hadoop.security.token.TokenSelector;
72  
73  /**
74   * Provides the basics for a RpcClient implementation like configuration and Logging.
75   * <p>
76   * Locking schema of the current IPC implementation
77   * <ul>
78   * <li>There is a lock in {@link AbstractRpcClient} to protect the fetching or creating
79   * connection.</li>
80   * <li>There is a lock in {@link Call} to make sure that we can only finish the call once.</li>
81   * <li>The same for {@link HBaseRpcController} as {@link Call}. And see the comment of
82   * {@link HBaseRpcController#notifyOnCancel(RpcCallback, HBaseRpcController.CancellationCallback)}
83   * of how to deal with cancel.</li>
84   * <li>For connection implementation, the construction of a connection should be as fast as possible
85   * because the creation is protected under a lock. Connect to remote side when needed. There is no
86   * forced locking schema for a connection implementation.</li>
87   * <li>For the locking order, the {@link Call} and {@link HBaseRpcController}'s lock should be held
88   * at last. So the callbacks in {@link Call} and {@link HBaseRpcController} should be execute
89   * outside the lock in {@link Call} and {@link HBaseRpcController} which means the implementations
90   * of the callbacks are free to hold any lock.</li>
91   * </ul>
92   */
93  @InterfaceAudience.Private
94  public abstract class AbstractRpcClient<T extends RpcConnection> implements RpcClient {
95    // Log level is being changed in tests
96    public static final Log LOG = LogFactory.getLog(AbstractRpcClient.class);
97  
98    protected static final HashedWheelTimer WHEEL_TIMER = new HashedWheelTimer(
99        Threads.newDaemonThreadFactory("RpcClient-timer"), 10, TimeUnit.MILLISECONDS);
100 
101   private static final ScheduledExecutorService IDLE_CONN_SWEEPER = Executors
102       .newScheduledThreadPool(1, Threads.newDaemonThreadFactory("Idle-Rpc-Conn-Sweeper"));
103 
104   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_MUTABLE_COLLECTION_PKGPROTECT",
105       justification="the rest of the system which live in the different package can use")
106   protected final static Map<Kind, TokenSelector<? extends TokenIdentifier>> TOKEN_HANDLERS = new HashMap<>();
107 
108   static {
109     TOKEN_HANDLERS.put(Kind.HBASE_AUTH_TOKEN, new AuthenticationTokenSelector());
110   }
111 
112   protected boolean running = true; // if client runs
113 
114   protected final Configuration conf;
115   protected final String clusterId;
116   protected final SocketAddress localAddr;
117   protected final MetricsConnection metrics;
118 
119   protected final UserProvider userProvider;
120   protected final CellBlockBuilder cellBlockBuilder;
121 
122   protected final int minIdleTimeBeforeClose; // if the connection is idle for more than this
123   // time (in ms), it will be closed at any moment.
124   protected final int maxRetries; // the max. no. of retries for socket connections
125   protected final long failureSleep; // Time to sleep before retry on failure.
126   protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
127   protected final boolean tcpKeepAlive; // if T then use keepalives
128   protected final Codec codec;
129   protected final CompressionCodec compressor;
130   protected final boolean fallbackAllowed;
131 
132   protected final FailedServers failedServers;
133 
134   protected final int connectTO;
135   protected final int readTO;
136   protected final int writeTO;
137 
138   protected final PoolMap<ConnectionId, T> connections;
139 
140   private final AtomicInteger callIdCnt = new AtomicInteger(0);
141 
142   private final ScheduledFuture<?> cleanupIdleConnectionTask;
143 
144   private int maxConcurrentCallsPerServer;
145 
146   private static final LoadingCache<Address, AtomicInteger> concurrentCounterCache =
147       CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS).
148           build(new CacheLoader<Address, AtomicInteger>() {
149             @Override public AtomicInteger load(Address key) throws Exception {
150               return new AtomicInteger(0);
151             }
152           });
153 
154   /**
155    * Construct an IPC client for the cluster <code>clusterId</code>
156    * @param conf configuration
157    * @param clusterId the cluster id
158    * @param localAddr client socket bind address.
159    * @param metrics the connection metrics
160    */
161   public AbstractRpcClient(Configuration conf, String clusterId, SocketAddress localAddr,
162       MetricsConnection metrics) {
163     this.userProvider = UserProvider.instantiate(conf);
164     this.localAddr = localAddr;
165     this.tcpKeepAlive = conf.getBoolean("hbase.ipc.client.tcpkeepalive", true);
166     this.clusterId = clusterId != null ? clusterId : HConstants.CLUSTER_ID_DEFAULT;
167     this.failureSleep = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
168       HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
169     this.maxRetries = conf.getInt("hbase.ipc.client.connect.max.retries", 0);
170     this.tcpNoDelay = conf.getBoolean("hbase.ipc.client.tcpnodelay", true);
171     this.cellBlockBuilder = new CellBlockBuilder(conf);
172 
173     this.minIdleTimeBeforeClose = conf.getInt(IDLE_TIME, 120000); // 2 minutes
174     this.conf = conf;
175     this.codec = getCodec();
176     this.compressor = getCompressor(conf);
177     this.fallbackAllowed = conf.getBoolean(IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
178       IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
179     this.failedServers = new FailedServers(conf);
180     this.connectTO = conf.getInt(SOCKET_TIMEOUT_CONNECT, DEFAULT_SOCKET_TIMEOUT_CONNECT);
181     this.readTO = conf.getInt(SOCKET_TIMEOUT_READ, DEFAULT_SOCKET_TIMEOUT_READ);
182     this.writeTO = conf.getInt(SOCKET_TIMEOUT_WRITE, DEFAULT_SOCKET_TIMEOUT_WRITE);
183     this.metrics = metrics;
184     this.maxConcurrentCallsPerServer = conf.getInt(
185         HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD,
186         HConstants.DEFAULT_HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD);
187 
188     this.connections = new PoolMap<>(getPoolType(conf), getPoolSize(conf));
189 
190     this.cleanupIdleConnectionTask = IDLE_CONN_SWEEPER.scheduleAtFixedRate(new Runnable() {
191 
192       @Override
193       public void run() {
194         cleanupIdleConnections();
195       }
196     }, minIdleTimeBeforeClose, minIdleTimeBeforeClose, TimeUnit.MILLISECONDS);
197 
198     if (LOG.isDebugEnabled()) {
199       LOG.debug("Codec=" + this.codec + ", compressor=" + this.compressor + ", tcpKeepAlive="
200           + this.tcpKeepAlive + ", tcpNoDelay=" + this.tcpNoDelay + ", connectTO=" + this.connectTO
201           + ", readTO=" + this.readTO + ", writeTO=" + this.writeTO + ", minIdleTimeBeforeClose="
202           + this.minIdleTimeBeforeClose + ", maxRetries=" + this.maxRetries + ", fallbackAllowed="
203           + this.fallbackAllowed + ", bind address="
204           + (this.localAddr != null ? this.localAddr : "null"));
205     }
206   }
207 
208   private void cleanupIdleConnections() {
209     long closeBeforeTime = EnvironmentEdgeManager.currentTime() - minIdleTimeBeforeClose;
210     synchronized (connections) {
211       for (T conn : connections.values()) {
212         // remove connection if it has not been chosen by anyone for more than maxIdleTime, and the
213         // connection itself has already shutdown. The latter check is because that we may still
214         // have some pending calls on connection so we should not shutdown the connection outside.
215         // The connection itself will disconnect if there is no pending call for maxIdleTime.
216         if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) {
217           LOG.info("Cleanup idle connection to " + conn.remoteId().getAddress());
218           connections.removeValue(conn.remoteId(), conn);
219           conn.cleanupConnection();
220         }
221       }
222     }
223   }
224 
225   public static String getDefaultCodec(final Configuration c) {
226     // If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because
227     // Configuration will complain -- then no default codec (and we'll pb everything). Else
228     // default is KeyValueCodec
229     return c.get(DEFAULT_CODEC_CLASS, KeyValueCodec.class.getCanonicalName());
230   }
231 
232   /**
233    * Encapsulate the ugly casting and RuntimeException conversion in private method.
234    * @return Codec to use on this client.
235    */
236   Codec getCodec() {
237     // For NO CODEC, "hbase.client.rpc.codec" must be configured with empty string AND
238     // "hbase.client.default.rpc.codec" also -- because default is to do cell block encoding.
239     String className = conf.get(HConstants.RPC_CODEC_CONF_KEY, getDefaultCodec(this.conf));
240     if (className == null || className.length() == 0) {
241       return null;
242     }
243     try {
244       return (Codec) Class.forName(className).getDeclaredConstructor().newInstance();
245     } catch (Exception e) {
246       throw new RuntimeException("Failed getting codec " + className, e);
247     }
248   }
249 
250   @Override
251   public boolean hasCellBlockSupport() {
252     return this.codec != null;
253   }
254 
255   // for writing tests that want to throw exception when connecting.
256   boolean isTcpNoDelay() {
257     return tcpNoDelay;
258   }
259 
260   /**
261    * Encapsulate the ugly casting and RuntimeException conversion in private method.
262    * @param conf configuration
263    * @return The compressor to use on this client.
264    */
265   private static CompressionCodec getCompressor(final Configuration conf) {
266     String className = conf.get("hbase.client.rpc.compressor", null);
267     if (className == null || className.isEmpty()) {
268       return null;
269     }
270     try {
271       return (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance();
272     } catch (Exception e) {
273       throw new RuntimeException("Failed getting compressor " + className, e);
274     }
275   }
276 
277   /**
278    * Return the pool type specified in the configuration, which must be set to either
279    * {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
280    * {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}, otherwise default to the
281    * former. For applications with many user threads, use a small round-robin pool. For applications
282    * with few user threads, you may want to try using a thread-local pool. In any case, the number
283    * of {@link org.apache.hadoop.hbase.ipc.RpcClient} instances should not exceed the operating
284    * system's hard limit on the number of connections.
285    * @param config configuration
286    * @return either a {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin} or
287    *         {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#ThreadLocal}
288    */
289   private static PoolMap.PoolType getPoolType(Configuration config) {
290     return PoolMap.PoolType.valueOf(config.get(HConstants.HBASE_CLIENT_IPC_POOL_TYPE),
291       PoolMap.PoolType.RoundRobin, PoolMap.PoolType.ThreadLocal);
292   }
293 
294   /**
295    * Return the pool size specified in the configuration, which is applicable only if the pool type
296    * is {@link org.apache.hadoop.hbase.util.PoolMap.PoolType#RoundRobin}.
297    * @param config configuration
298    * @return the maximum pool size
299    */
300   private static int getPoolSize(Configuration config) {
301     return config.getInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, 1);
302   }
303 
304   private int nextCallId() {
305     int id, next;
306     do {
307       id = callIdCnt.get();
308       next = id < Integer.MAX_VALUE ? id + 1 : 0;
309     } while (!callIdCnt.compareAndSet(id, next));
310     return id;
311   }
312 
313   /**
314    * Make a blocking call. Throws exceptions if there are network problems or if the remote code
315    * threw an exception.
316    * @param ticket Be careful which ticket you pass. A new user will mean a new Connection.
317    *          {@link UserProvider#getCurrent()} makes a new instance of User each time so will be a
318    *          new Connection each time.
319    * @return A pair with the Message response and the Cell data (if any).
320    */
321   private Message callBlockingMethod(Descriptors.MethodDescriptor md, HBaseRpcController hrc,
322       Message param, Message returnType, final User ticket, final InetSocketAddress isa)
323       throws ServiceException {
324     BlockingRpcCallback<Message> done = new BlockingRpcCallback<>();
325     callMethod(md, hrc, param, returnType, ticket, isa, done);
326     Message val;
327     try {
328       val = done.get();
329     } catch (IOException e) {
330       throw new ServiceException(e);
331     }
332     if (hrc.failed()) {
333       throw new ServiceException(hrc.getFailed());
334     } else {
335       return val;
336     }
337   }
338 
339   /**
340    * Get a connection from the pool, or create a new one and add it to the pool. Connections to a
341    * given host/port are reused.
342    */
343   private T getConnection(ConnectionId remoteId) throws IOException {
344     if (failedServers.isFailedServer(remoteId.getAddress())) {
345       if (LOG.isDebugEnabled()) {
346         LOG.debug("Not trying to connect to " + remoteId.getAddress()
347             + " this server is in the failed servers list");
348       }
349       throw new FailedServerException(
350           "This server is in the failed servers list: " + remoteId.getAddress());
351     }
352     T conn;
353     synchronized (connections) {
354       if (!running) {
355         throw new StoppedRpcClientException();
356       }
357       conn = connections.get(remoteId);
358       if (conn == null) {
359         conn = createConnection(remoteId);
360         connections.put(remoteId, conn);
361       }
362       conn.setLastTouched(EnvironmentEdgeManager.currentTime());
363     }
364     return conn;
365   }
366 
367   /**
368    * Not connected.
369    */
370   protected abstract T createConnection(ConnectionId remoteId) throws IOException;
371 
372   private void onCallFinished(Call call, HBaseRpcController hrc, Address addr,
373       RpcCallback<Message> callback) {
374     call.callStats.setCallTimeMs(EnvironmentEdgeManager.currentTime() - call.getStartTime());
375     if (metrics != null) {
376       metrics.updateRpc(call.md, call.param, call.callStats);
377     }
378     if (LOG.isTraceEnabled()) {
379       LOG.trace(
380         "Call: " + call.md.getName() + ", callTime: " + call.callStats.getCallTimeMs() + "ms");
381     }
382     if (call.error != null) {
383       if (call.error instanceof RemoteException) {
384         call.error.fillInStackTrace();
385         hrc.setFailed(call.error);
386       } else {
387         hrc.setFailed(wrapException(addr, call.error));
388       }
389       callback.run(null);
390     } else {
391       hrc.setDone(call.cells);
392       callback.run(call.response);
393     }
394   }
395 
396   private void callMethod(final Descriptors.MethodDescriptor md, final HBaseRpcController hrc,
397       final Message param, Message returnType, final User ticket,
398       final InetSocketAddress inetAddr, final RpcCallback<Message> callback) {
399     final MetricsConnection.CallStats cs = MetricsConnection.newCallStats();
400     cs.setStartTime(EnvironmentEdgeManager.currentTime());
401     final Address addr = Address.fromSocketAddress(inetAddr);
402     final AtomicInteger counter = concurrentCounterCache.getUnchecked(addr);
403     Call call = new Call(nextCallId(), md, param, hrc.cellScanner(), returnType,
404         hrc.getCallTimeout(), hrc.getPriority(), new RpcCallback<Call>() {
405           @Override
406           public void run(Call call) {
407             counter.decrementAndGet();
408             onCallFinished(call, hrc, addr, callback);
409           }
410         }, cs);
411     ConnectionId remoteId = new ConnectionId(ticket, md.getService().getName(), addr);
412     int count = counter.incrementAndGet();
413     try {
414       if (count > maxConcurrentCallsPerServer) {
415         throw new ServerTooBusyException(addr, count);
416       }
417       cs.setConcurrentCallsPerServer(count);
418       T connection = getConnection(remoteId);
419       connection.sendRequest(call, hrc);
420     } catch (Exception e) {
421       call.setException(toIOE(e));
422     }
423   }
424 
425   private static Address createAddr(ServerName sn) {
426     return Address.fromParts(sn.getHostname(), sn.getPort());
427   }
428 
429   /**
430    * Interrupt the connections to the given ip:port server. This should be called if the server is
431    * known as actually dead. This will not prevent current operation to be retried, and, depending
432    * on their own behavior, they may retry on the same server. This can be a feature, for example at
433    * startup. In any case, they're likely to get connection refused (if the process died) or no
434    * route to host: i.e. their next retries should be faster and with a safe exception.
435    */
436   @Override
437   public void cancelConnections(ServerName sn) {
438     synchronized (connections) {
439       for (T connection : connections.values()) {
440         ConnectionId remoteId = connection.remoteId();
441         if (remoteId.getAddress().getPort() == sn.getPort() &&
442             remoteId.getAddress().getHostname().equals(sn.getHostname())) {
443           LOG.info("The server on " + sn.toString() + " is dead - stopping the connection " +
444               connection.remoteId);
445           connections.removeValue(remoteId, connection);
446           connection.shutdown();
447         }
448       }
449     }
450   }
451 
452   /**
453    * Configure an hbase rpccontroller
454    * @param controller to configure
455    * @param channelOperationTimeout timeout for operation
456    * @return configured controller
457    */
458   static HBaseRpcController configureHBaseRpcController(
459       RpcController controller, int channelOperationTimeout) {
460     HBaseRpcController hrc;
461     if (controller != null && controller instanceof HBaseRpcController) {
462       hrc = (HBaseRpcController) controller;
463       if (!hrc.hasCallTimeout()) {
464         hrc.setCallTimeout(channelOperationTimeout);
465       }
466     } else {
467       hrc = new HBaseRpcControllerImpl();
468       hrc.setCallTimeout(channelOperationTimeout);
469     }
470     return hrc;
471   }
472 
473   protected abstract void closeInternal();
474 
475   @Override
476   public void close() {
477     if (LOG.isDebugEnabled()) {
478       LOG.debug("Stopping rpc client");
479     }
480     Collection<T> connToClose;
481     synchronized (connections) {
482       if (!running) {
483         return;
484       }
485       running = false;
486       connToClose = connections.values();
487       connections.clear();
488     }
489     cleanupIdleConnectionTask.cancel(true);
490     for (T conn : connToClose) {
491       conn.shutdown();
492     }
493     closeInternal();
494     for (T conn : connToClose) {
495       conn.cleanupConnection();
496     }
497   }
498 
499   @Override
500   public BlockingRpcChannel createBlockingRpcChannel(final ServerName sn, final User ticket,
501       int rpcTimeout) {
502     return new BlockingRpcChannelImplementation(this, createAddr(sn), ticket, rpcTimeout);
503   }
504 
505   @Override
506   public RpcChannel createRpcChannel(ServerName sn, User user, int rpcTimeout) {
507     return new RpcChannelImplementation(this, createAddr(sn), user, rpcTimeout);
508   }
509 
510   private static class AbstractRpcChannel {
511 
512     protected final Address addr;
513 
514     // We cache the resolved InetSocketAddress for the channel so we do not do a DNS lookup
515     // per method call on the channel. If the remote target is removed or reprovisioned and
516     // its identity changes a new channel with a newly resolved InetSocketAddress will be
517     // created as part of retry, so caching here is fine.
518     // Normally, caching an InetSocketAddress is an anti-pattern.
519     protected InetSocketAddress isa;
520 
521     protected final AbstractRpcClient<?> rpcClient;
522 
523     protected final User ticket;
524 
525     protected final int rpcTimeout;
526 
527     protected AbstractRpcChannel(AbstractRpcClient<?> rpcClient, Address addr,
528         User ticket, int rpcTimeout) {
529       this.addr = addr;
530       this.rpcClient = rpcClient;
531       this.ticket = ticket;
532       this.rpcTimeout = rpcTimeout;
533     }
534 
535     /**
536      * Configure an rpc controller
537      * @param controller to configure
538      * @return configured rpc controller
539      */
540     protected HBaseRpcController configureRpcController(RpcController controller) {
541       HBaseRpcController hrc;
542       // TODO: Ideally we should not use an RpcController other than HBaseRpcController at client
543       // side. And now we may use ServerRpcController.
544       if (controller != null && controller instanceof HBaseRpcController) {
545         hrc = (HBaseRpcController) controller;
546         if (!hrc.hasCallTimeout()) {
547           hrc.setCallTimeout(rpcTimeout);
548         }
549       } else {
550         hrc = new HBaseRpcControllerImpl();
551         hrc.setCallTimeout(rpcTimeout);
552       }
553       return hrc;
554     }
555   }
556 
557   /**
558    * Blocking rpc channel that goes via hbase rpc.
559    */
560   public static class BlockingRpcChannelImplementation extends AbstractRpcChannel
561       implements BlockingRpcChannel {
562 
563     protected BlockingRpcChannelImplementation(AbstractRpcClient<?> rpcClient,
564         Address addr, User ticket, int rpcTimeout) {
565       super(rpcClient, addr, ticket, rpcTimeout);
566     }
567 
568     @Override
569     public Message callBlockingMethod(Descriptors.MethodDescriptor md, RpcController controller,
570         Message param, Message returnType) throws ServiceException {
571       // Look up remote address upon first call
572       if (isa == null) {
573         if (this.rpcClient.metrics != null) {
574           this.rpcClient.metrics.incrNsLookups();
575         }
576         isa = addr.toSocketAddress();
577         if (isa.isUnresolved()) {
578           if (this.rpcClient.metrics != null) {
579             this.rpcClient.metrics.incrNsLookupsFailed();
580           }
581           isa = null;
582           throw new ServiceException(new UnknownHostException(addr + " could not be resolved"));
583         }
584       }
585       return rpcClient.callBlockingMethod(md, configureRpcController(controller),
586         param, returnType, ticket, isa);
587     }
588   }
589 
590   /**
591    * Async rpc channel that goes via hbase rpc.
592    */
593   public static class RpcChannelImplementation extends AbstractRpcChannel implements
594       RpcChannel {
595 
596     protected RpcChannelImplementation(AbstractRpcClient<?> rpcClient, Address addr,
597         User ticket, int rpcTimeout) {
598       super(rpcClient, addr, ticket, rpcTimeout);
599     }
600 
601     @Override
602     public void callMethod(Descriptors.MethodDescriptor md, RpcController controller,
603         Message param, Message returnType, RpcCallback<Message> done) {
604       HBaseRpcController configuredController =
605         configureRpcController(Preconditions.checkNotNull(controller,
606           "RpcController can not be null for async rpc call"));
607       // Look up remote address upon first call
608       if (isa == null || isa.isUnresolved()) {
609         if (this.rpcClient.metrics != null) {
610           this.rpcClient.metrics.incrNsLookups();
611         }
612         isa = addr.toSocketAddress();
613         if (isa.isUnresolved()) {
614           if (this.rpcClient.metrics != null) {
615             this.rpcClient.metrics.incrNsLookupsFailed();
616           }
617           isa = null;
618           controller.setFailed(addr + " could not be resolved");
619           return;
620         }
621       }
622       // This method does not throw any exceptions, so the caller must provide a
623       // HBaseRpcController which is used to pass the exceptions.
624       this.rpcClient.callMethod(md, configuredController, param, returnType, ticket, isa, done);
625     }
626   }
627 }