View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase.client;
19  
20  import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
21  import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
22  
23  import java.io.IOException;
24  import java.util.Arrays;
25  import java.util.Comparator;
26  import java.util.List;
27  import java.util.concurrent.ExecutorService;
28  import java.util.concurrent.ThreadLocalRandom;
29  
30  import org.apache.commons.logging.Log;
31  import org.apache.hadoop.conf.Configuration;
32  import org.apache.hadoop.hbase.Cell;
33  import org.apache.hadoop.hbase.CellComparator;
34  import org.apache.hadoop.hbase.CellUtil;
35  import org.apache.hadoop.hbase.HConstants;
36  import org.apache.hadoop.hbase.HRegionInfo;
37  import org.apache.hadoop.hbase.ServerName;
38  import org.apache.hadoop.hbase.TableName;
39  import org.apache.hadoop.hbase.classification.InterfaceAudience;
40  import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
41  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
42  import org.apache.hadoop.hbase.security.User;
43  import org.apache.hadoop.hbase.security.UserProvider;
44  import org.apache.hadoop.hbase.util.Bytes;
45  
46  /**
47   * Utility used by client connections.
48   */
49  @InterfaceAudience.Private
50  public class ConnectionUtils {
51  
52    /**
53     * Calculate pause time.
54     * Built on {@link HConstants#RETRY_BACKOFF}.
55     * @param pause
56     * @param tries
57     * @return How long to wait after <code>tries</code> retries
58     */
59    public static long getPauseTime(final long pause, final int tries) {
60      int ntries = tries;
61      if (ntries >= HConstants.RETRY_BACKOFF.length) {
62        ntries = HConstants.RETRY_BACKOFF.length - 1;
63      }
64      if (ntries < 0) {
65        ntries = 0;
66      }
67  
68      long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
69      // 1% possible jitter
70      long jitter = (long) (normalPause * ThreadLocalRandom.current().nextFloat() * 0.01f);
71      return normalPause + jitter;
72    }
73  
74  
75    /**
76     * @param conn The connection for which to replace the generator.
77     * @param cnm Replaces the nonce generator used, for testing.
78     * @return old nonce generator.
79     */
80    public static NonceGenerator injectNonceGeneratorForTesting(
81        ClusterConnection conn, NonceGenerator cnm) {
82      return ConnectionManager.injectNonceGeneratorForTesting(conn, cnm);
83    }
84  
85    /**
86     * Changes the configuration to set the number of retries needed when using HConnection
87     * internally, e.g. for  updating catalog tables, etc.
88     * Call this method before we create any Connections.
89     * @param c The Configuration instance to set the retries into.
90     * @param log Used to log what we set in here.
91     */
92    public static void setServerSideHConnectionRetriesConfig(
93        final Configuration c, final String sn, final Log log) {
94      // TODO: Fix this. Not all connections from server side should have 10 times the retries.
95      int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
96        HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
97      // Go big.  Multiply by 10.  If we can't get to meta after this many retries
98      // then something seriously wrong.
99      int serversideMultiplier = c.getInt("hbase.client.serverside.retries.multiplier", 10);
100     int retries = hcRetries * serversideMultiplier;
101     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
102     log.info(sn + " server-side HConnection retries=" + retries);
103   }
104 
105   /**
106    * Adapt a HConnection so that it can bypass the RPC layer (serialization,
107    * deserialization, networking, etc..) -- i.e. short-circuit -- when talking to a local server.
108    * @param conn the connection to adapt
109    * @param serverName the local server name
110    * @param admin the admin interface of the local server
111    * @param client the client interface of the local server
112    * @return an adapted/decorated HConnection
113    */
114   @Deprecated
115   public static ClusterConnection createShortCircuitHConnection(final Connection conn,
116       final ServerName serverName, final AdminService.BlockingInterface admin,
117       final ClientService.BlockingInterface client) {
118     return new ConnectionAdapter(conn) {
119       @Override
120       public AdminService.BlockingInterface getAdmin(
121           ServerName sn, boolean getMaster) throws IOException {
122         return serverName.equals(sn) ? admin : super.getAdmin(sn, getMaster);
123       }
124 
125       @Override
126       public ClientService.BlockingInterface getClient(
127           ServerName sn) throws IOException {
128         return serverName.equals(sn) ? client : super.getClient(sn);
129       }
130     };
131   }
132 
133   /**
134    * Creates a short-circuit connection that can bypass the RPC layer (serialization,
135    * deserialization, networking, etc..) when talking to a local server.
136    * @param conf the current configuration
137    * @param pool the thread pool to use for batch operations
138    * @param user the user the connection is for
139    * @param serverName the local server name
140    * @param admin the admin interface of the local server
141    * @param client the client interface of the local server
142    * @return a short-circuit connection.
143    * @throws IOException
144    */
145   public static ClusterConnection createShortCircuitConnection(final Configuration conf,
146     ExecutorService pool, User user, final ServerName serverName,
147     final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
148     throws IOException {
149     if (user == null) {
150       user = UserProvider.instantiate(conf).getCurrent();
151     }
152     return new ConnectionManager.HConnectionImplementation(conf, false, pool, user) {
153       @Override
154       public AdminService.BlockingInterface getAdmin(ServerName sn, boolean getMaster)
155         throws IOException {
156         return serverName.equals(sn) ? admin : super.getAdmin(sn, getMaster);
157       }
158 
159       @Override
160       public ClientService.BlockingInterface getClient(ServerName sn) throws IOException {
161         return serverName.equals(sn) ? client : super.getClient(sn);
162       }
163     };
164   }
165 
166   /**
167    * Setup the connection class, so that it will not depend on master being online. Used for testing
168    * @param conf configuration to set
169    */
170   public static void setupMasterlessConnection(Configuration conf) {
171     conf.set(HConnection.HBASE_CLIENT_CONNECTION_IMPL,
172       MasterlessConnection.class.getName());
173   }
174 
175   /**
176    * Some tests shut down the master. But table availability is a master RPC which is performed on
177    * region re-lookups.
178    */
179   static class MasterlessConnection extends ConnectionManager.HConnectionImplementation {
180     MasterlessConnection(Configuration conf, boolean managed,
181       ExecutorService pool, User user) throws IOException {
182       super(conf, managed, pool, user);
183     }
184 
185     @Override
186     public boolean isTableDisabled(TableName tableName) throws IOException {
187       // treat all tables as enabled
188       return false;
189     }
190   }
191 
192   /**
193    * Get a unique key for the rpc stub to the given server.
194    */
195   static String getStubKey(String serviceName, ServerName serverName) {
196     return String.format("%s@%s", serviceName, serverName);
197   }
198 
199   // A byte array in which all elements are the max byte, and it is used to
200   // construct closest front row
201   static final byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
202 
203   /**
204    * Create the closest row after the specified row
205    */
206   static byte[] createClosestRowAfter(byte[] row) {
207     return Arrays.copyOf(row, row.length + 1);
208   }
209 
210   /**
211    * Create a row before the specified row and very close to the specified row.
212    */
213   static byte[] createCloseRowBefore(byte[] row) {
214     if (row.length == 0) {
215       return MAX_BYTE_ARRAY;
216     }
217     if (row[row.length - 1] == 0) {
218       return Arrays.copyOf(row, row.length - 1);
219     } else {
220       byte[] nextRow = new byte[row.length + MAX_BYTE_ARRAY.length];
221       System.arraycopy(row, 0, nextRow, 0, row.length - 1);
222       nextRow[row.length - 1] = (byte) ((row[row.length - 1] & 0xFF) - 1);
223       System.arraycopy(MAX_BYTE_ARRAY, 0, nextRow, row.length, MAX_BYTE_ARRAY.length);
224       return nextRow;
225     }
226   }
227 
228   static boolean isEmptyStartRow(byte[] row) {
229     return Bytes.equals(row, EMPTY_START_ROW);
230   }
231 
232   static boolean isEmptyStopRow(byte[] row) {
233     return Bytes.equals(row, EMPTY_END_ROW);
234   }
235 
236   private static final Comparator<Cell> COMPARE_WITHOUT_ROW = new Comparator<Cell>() {
237 
238     @Override
239     public int compare(Cell o1, Cell o2) {
240       return CellComparator.compareWithoutRow(o1, o2);
241     }
242   };
243 
244   static Result filterCells(Result result, Cell keepCellsAfter) {
245     if (keepCellsAfter == null) {
246       // do not need to filter
247       return result;
248     }
249     // not the same row
250     if (!CellUtil.matchingRow(keepCellsAfter, result.getRow(), 0, result.getRow().length)) {
251       return result;
252     }
253     Cell[] rawCells = result.rawCells();
254     int index = Arrays.binarySearch(rawCells, keepCellsAfter, COMPARE_WITHOUT_ROW);
255     if (index < 0) {
256       index = -index - 1;
257     } else {
258       index++;
259     }
260     if (index == 0) {
261       return result;
262     }
263     if (index == rawCells.length) {
264       return null;
265     }
266     return Result.create(Arrays.copyOfRange(rawCells, index, rawCells.length), null,
267       result.isStale(), result.mayHaveMoreCellsInRow());
268   }
269 
270   static boolean noMoreResultsForScan(Scan scan, HRegionInfo info) {
271     if (isEmptyStopRow(info.getEndKey())) {
272       return true;
273     }
274     if (isEmptyStopRow(scan.getStopRow())) {
275       return false;
276     }
277     int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
278     // 1. if our stop row is less than the endKey of the region
279     // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
280     // for scan.
281     return c > 0 || (c == 0 && !scan.includeStopRow());
282   }
283 
284   static boolean noMoreResultsForReverseScan(Scan scan, HRegionInfo info) {
285     if (isEmptyStartRow(info.getStartKey())) {
286       return true;
287     }
288     if (isEmptyStopRow(scan.getStopRow())) {
289       return false;
290     }
291     // no need to test the inclusive of the stop row as the start key of a region is included in
292     // the region.
293     return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
294   }
295 
296   public static ScanResultCache createScanResultCache(Scan scan, List<Result> cache) {
297     if (scan.getAllowPartialResults()) {
298       return new AllowPartialScanResultCache(cache);
299     } else if (scan.getBatch() > 0) {
300       return new BatchScanResultCache(cache, scan.getBatch());
301     } else {
302       return new CompleteScanResultCache(cache);
303     }
304   }
305 }