View Javadoc

1   /**
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  package org.apache.hadoop.hbase;
19  
20  import com.google.protobuf.ServiceException;
21  
22  import org.apache.commons.logging.Log;
23  import org.apache.commons.logging.LogFactory;
24  import org.apache.hadoop.conf.Configuration;
25  import org.apache.hadoop.hbase.classification.InterfaceAudience;
26  import org.apache.hadoop.hbase.client.ClusterConnection;
27  import org.apache.hadoop.hbase.client.Connection;
28  import org.apache.hadoop.hbase.client.ConnectionFactory;
29  import org.apache.hadoop.hbase.client.Delete;
30  import org.apache.hadoop.hbase.client.Get;
31  import org.apache.hadoop.hbase.client.HTable;
32  import org.apache.hadoop.hbase.client.Mutation;
33  import org.apache.hadoop.hbase.client.Put;
34  import org.apache.hadoop.hbase.client.RegionLocator;
35  import org.apache.hadoop.hbase.client.RegionReplicaUtil;
36  import org.apache.hadoop.hbase.client.Result;
37  import org.apache.hadoop.hbase.client.ResultScanner;
38  import org.apache.hadoop.hbase.client.Scan;
39  import org.apache.hadoop.hbase.client.Table;
40  import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
41  import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
42  import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
43  import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
44  import org.apache.hadoop.hbase.util.Bytes;
45  import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
46  import org.apache.hadoop.hbase.util.Pair;
47  import org.apache.hadoop.hbase.util.PairOfSameType;
48  import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
49  import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
50  
51  import java.io.IOException;
52  import java.io.InterruptedIOException;
53  import java.util.ArrayList;
54  import java.util.List;
55  import java.util.Map;
56  import java.util.NavigableMap;
57  import java.util.Set;
58  import java.util.SortedMap;
59  import java.util.TreeMap;
60  import java.util.regex.Matcher;
61  import java.util.regex.Pattern;
62  
63  /**
64   * Read/write operations on region and assignment information store in
65   * <code>hbase:meta</code>.
66   *
67   * Some of the methods of this class take ZooKeeperWatcher as a param. The only reason
68   * for this is because when used on client-side (like from HBaseAdmin), we want to use
69   * short-living connection (opened before each operation, closed right after), while
70   * when used on HM or HRS (like in AssignmentManager) we want permanent connection.
71   */
72  @InterfaceAudience.Private
73  public class MetaTableAccessor {
74  
75    /*
76     * HBASE-10070 adds a replicaId to HRI, meaning more than one HRI can be defined for the
77     * same table range (table, startKey, endKey). For every range, there will be at least one
78     * HRI defined which is called default replica.
79     *
80     * Meta layout (as of 0.98 + HBASE-10070) is like:
81     * For each table range, there is a single row, formatted like:
82     * <tableName>,<startKey>,<regionId>,<encodedRegionName>. This row corresponds to the regionName
83     * of the default region replica.
84     * Columns are:
85     * info:regioninfo         => contains serialized HRI for the default region replica
86     * info:server             => contains hostname:port (in string form) for the server hosting
87     *                            the default regionInfo replica
88     * info:server_<replicaId> => contains hostname:port (in string form) for the server hosting the
89     *                            regionInfo replica with replicaId
90     * info:serverstartcode    => contains server start code (in binary long form) for the server
91     *                            hosting the default regionInfo replica
92     * info:serverstartcode_<replicaId> => contains server start code (in binary long form) for the
93     *                                     server hosting the regionInfo replica with replicaId
94     * info:seqnumDuringOpen    => contains seqNum (in binary long form) for the region at the time
95     *                             the server opened the region with default replicaId
96     * info:seqnumDuringOpen_<replicaId> => contains seqNum (in binary long form) for the region at
97     *                             the time the server opened the region with replicaId
98     * info:splitA              => contains a serialized HRI for the first daughter region if the
99     *                             region is split
100    * info:splitB              => contains a serialized HRI for the second daughter region if the
101    *                             region is split
102    * info:mergeA              => contains a serialized HRI for the first parent region if the
103    *                             region is the result of a merge
104    * info:mergeB              => contains a serialized HRI for the second parent region if the
105    *                             region is the result of a merge
106    *
107    * The actual layout of meta should be encapsulated inside MetaTableAccessor methods,
108    * and should not leak out of it (through Result objects, etc)
109    */
110 
111   private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class);
112 
113   static final byte [] META_REGION_PREFIX;
114   static {
115     // Copy the prefix from FIRST_META_REGIONINFO into META_REGION_PREFIX.
116     // FIRST_META_REGIONINFO == 'hbase:meta,,1'.  META_REGION_PREFIX == 'hbase:meta,'
117     int len = HRegionInfo.FIRST_META_REGIONINFO.getRegionName().length - 2;
118     META_REGION_PREFIX = new byte [len];
119     System.arraycopy(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), 0,
120       META_REGION_PREFIX, 0, len);
121   }
122 
123   /** The delimiter for meta columns for replicaIds &gt; 0 */
124   protected static final char META_REPLICA_ID_DELIMITER = '_';
125 
126   /** A regex for parsing server columns from meta. See above javadoc for meta layout */
127   private static final Pattern SERVER_COLUMN_PATTERN
128     = Pattern.compile("^server(_[0-9a-fA-F]{4})?$");
129 
130   ////////////////////////
131   // Reading operations //
132   ////////////////////////
133 
134  /**
135    * Performs a full scan of a <code>hbase:meta</code> table.
136    * @return List of {@link org.apache.hadoop.hbase.client.Result}
137    * @throws IOException
138    */
139   public static List<Result> fullScanOfMeta(Connection connection)
140   throws IOException {
141     CollectAllVisitor v = new CollectAllVisitor();
142     fullScan(connection, v, null);
143     return v.getResults();
144   }
145 
146   /**
147    * Performs a full scan of <code>hbase:meta</code>.
148    * @param connection connection we're using
149    * @param visitor Visitor invoked against each row.
150    * @throws IOException
151    */
152   public static void fullScan(Connection connection,
153       final Visitor visitor)
154   throws IOException {
155     fullScan(connection, visitor, null);
156   }
157 
158   /**
159    * Performs a full scan of <code>hbase:meta</code>.
160    * @param connection connection we're using
161    * @return List of {@link Result}
162    * @throws IOException
163    */
164   public static List<Result> fullScan(Connection connection)
165     throws IOException {
166     CollectAllVisitor v = new CollectAllVisitor();
167     fullScan(connection, v, null);
168     return v.getResults();
169   }
170 
171   /**
172    * Callers should call close on the returned {@link Table} instance.
173    * @param connection connection we're using to access Meta
174    * @return An {@link Table} for <code>hbase:meta</code>
175    * @throws IOException
176    */
177   static Table getMetaHTable(final Connection connection)
178   throws IOException {
179     // We used to pass whole CatalogTracker in here, now we just pass in Connection
180     if (connection == null) {
181       throw new NullPointerException("No connection");
182     } else if (connection.isClosed()) {
183       throw new IOException("connection is closed");
184     }
185     // If the passed in 'connection' is 'managed' -- i.e. every second test uses
186     // a Table or an HBaseAdmin with managed connections -- then doing
187     // connection.getTable will throw an exception saying you are NOT to use
188     // managed connections getting tables.  Leaving this as it is for now. Will
189     // revisit when inclined to change all tests.  User code probaby makes use of
190     // managed connections too so don't change it till post hbase 1.0.
191     //
192     // There should still be a way to use this method with an unmanaged connection.
193     if (connection instanceof ClusterConnection) {
194       if (((ClusterConnection) connection).isManaged()) {
195         return new HTable(TableName.META_TABLE_NAME, (ClusterConnection) connection);
196       }
197     }
198     return connection.getTable(TableName.META_TABLE_NAME);
199   }
200 
201   /**
202    * @param t Table to use (will be closed when done).
203    * @param g Get to run
204    * @throws IOException
205    */
206   private static Result get(final Table t, final Get g) throws IOException {
207     try {
208       return t.get(g);
209     } finally {
210       t.close();
211     }
212   }
213 
214   /**
215    * Gets the region info and assignment for the specified region.
216    * @param connection connection we're using
217    * @param regionName Region to lookup.
218    * @return Location and HRegionInfo for <code>regionName</code>
219    * @throws IOException
220    * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
221    */
222   @Deprecated
223   public static Pair<HRegionInfo, ServerName> getRegion(Connection connection, byte [] regionName)
224     throws IOException {
225     HRegionLocation location = getRegionLocation(connection, regionName);
226     return location == null
227       ? null
228       : new Pair<HRegionInfo, ServerName>(location.getRegionInfo(), location.getServerName());
229   }
230 
231   /**
232    * Returns the HRegionLocation from meta for the given region
233    * @param connection connection we're using
234    * @param regionName region we're looking for
235    * @return HRegionLocation for the given region
236    * @throws IOException
237    */
238   public static HRegionLocation getRegionLocation(Connection connection,
239                                                   byte[] regionName) throws IOException {
240     byte[] row = regionName;
241     HRegionInfo parsedInfo = null;
242     try {
243       parsedInfo = parseRegionInfoFromRegionName(regionName);
244       row = getMetaKeyForRegion(parsedInfo);
245     } catch (Exception parseEx) {
246       // Ignore. This is used with tableName passed as regionName.
247     }
248     Get get = new Get(row);
249     get.addFamily(HConstants.CATALOG_FAMILY);
250     Result r = get(getMetaHTable(connection), get);
251     RegionLocations locations = getRegionLocations(r);
252     return locations == null
253       ? null
254       : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
255   }
256 
257   /**
258    * Returns the HRegionLocation from meta for the given region
259    * @param connection connection we're using
260    * @param regionInfo region information
261    * @return HRegionLocation for the given region
262    * @throws IOException
263    */
264   public static HRegionLocation getRegionLocation(Connection connection,
265                                                   HRegionInfo regionInfo) throws IOException {
266     byte[] row = getMetaKeyForRegion(regionInfo);
267     Get get = new Get(row);
268     get.addFamily(HConstants.CATALOG_FAMILY);
269     Result r = get(getMetaHTable(connection), get);
270     return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
271   }
272 
273   /** Returns the row key to use for this regionInfo */
274   public static byte[] getMetaKeyForRegion(HRegionInfo regionInfo) {
275     return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName();
276   }
277 
278   /** Returns an HRI parsed from this regionName. Not all the fields of the HRI
279    * is stored in the name, so the returned object should only be used for the fields
280    * in the regionName.
281    */
282   protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName)
283     throws IOException {
284     byte[][] fields = HRegionInfo.parseRegionName(regionName);
285     long regionId =  Long.parseLong(Bytes.toString(fields[2]));
286     int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0;
287     return new HRegionInfo(
288       TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId);
289   }
290 
291   /**
292    * Gets the result in hbase:meta for the specified region.
293    * @param connection connection we're using
294    * @param regionName region we're looking for
295    * @return result of the specified region
296    * @throws IOException
297    */
298   public static Result getRegionResult(Connection connection,
299       byte[] regionName) throws IOException {
300     Get get = new Get(regionName);
301     get.addFamily(HConstants.CATALOG_FAMILY);
302     return get(getMetaHTable(connection), get);
303   }
304 
305   /**
306    * Get regions from the merge qualifier of the specified merged region
307    * @return null if it doesn't contain merge qualifier, else two merge regions
308    * @throws IOException
309    */
310   public static Pair<HRegionInfo, HRegionInfo> getRegionsFromMergeQualifier(
311       Connection connection, byte[] regionName) throws IOException {
312     Result result = getRegionResult(connection, regionName);
313     HRegionInfo mergeA = getHRegionInfo(result, HConstants.MERGEA_QUALIFIER);
314     HRegionInfo mergeB = getHRegionInfo(result, HConstants.MERGEB_QUALIFIER);
315     if (mergeA == null && mergeB == null) {
316       return null;
317     }
318     return new Pair<HRegionInfo, HRegionInfo>(mergeA, mergeB);
319  }
320 
321   /**
322    * Checks if the specified table exists.  Looks at the hbase:meta table hosted on
323    * the specified server.
324    * @param connection connection we're using
325    * @param tableName table to check
326    * @return true if the table exists in meta, false if not
327    * @throws IOException
328    */
329   public static boolean tableExists(Connection connection,
330       final TableName tableName)
331   throws IOException {
332     if (tableName.equals(TableName.META_TABLE_NAME)) {
333       // Catalog tables always exist.
334       return true;
335     }
336     // Make a version of ResultCollectingVisitor that only collects the first
337     CollectingVisitor<HRegionInfo> visitor = new CollectingVisitor<HRegionInfo>() {
338       private HRegionInfo current = null;
339 
340       @Override
341       public boolean visit(Result r) throws IOException {
342         RegionLocations locations = getRegionLocations(r);
343         if (locations == null || locations.getRegionLocation().getRegionInfo() == null) {
344           LOG.warn("No serialized HRegionInfo in " + r);
345           return true;
346         }
347         this.current = locations.getRegionLocation().getRegionInfo();
348         if (this.current == null) {
349           LOG.warn("No serialized HRegionInfo in " + r);
350           return true;
351         }
352         if (!isInsideTable(this.current, tableName)) return false;
353         // Else call super and add this Result to the collection.
354         super.visit(r);
355         // Stop collecting regions from table after we get one.
356         return false;
357       }
358 
359       @Override
360       void add(Result r) {
361         // Add the current HRI.
362         this.results.add(this.current);
363       }
364     };
365     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
366     // If visitor has results >= 1 then table exists.
367     return visitor.getResults().size() >= 1;
368   }
369 
370   /**
371    * Gets all of the regions of the specified table.
372    * @param zkw zookeeper connection to access meta table
373    * @param connection connection we're using
374    * @param tableName table we're looking for
375    * @return Ordered list of {@link HRegionInfo}.
376    * @throws IOException
377    */
378   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw,
379       Connection connection, TableName tableName)
380   throws IOException {
381     return getTableRegions(zkw, connection, tableName, false);
382   }
383 
384   /**
385    * Gets all of the regions of the specified table.
386    * @param zkw zookeeper connection to access meta table
387    * @param connection connection we're using
388    * @param tableName table we're looking for
389    * @param excludeOfflinedSplitParents If true, do not include offlined split
390    * parents in the return.
391    * @return Ordered list of {@link HRegionInfo}.
392    * @throws IOException
393    */
394   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw, Connection connection,
395       TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException {
396     return getTableRegions(zkw, connection, tableName, excludeOfflinedSplitParents, false);
397   }
398 
399   /**
400    * Gets all of the regions of the specified table.
401    * @param zkw zookeeper connection to access meta table
402    * @param connection connection we're using
403    * @param tableName table we're looking for
404    * @param excludeOfflinedSplitParents If true, do not include offlined split parents in the
405    *          return.
406    * @param excludeReplicaRegions If true, do not include replica regions in the result.
407    * @return Ordered list of {@link HRegionInfo}.
408    */
409   public static List<HRegionInfo> getTableRegions(ZooKeeperWatcher zkw, Connection connection,
410       TableName tableName, final boolean excludeOfflinedSplitParents,
411       final boolean excludeReplicaRegions) throws IOException {
412     List<Pair<HRegionInfo, ServerName>> result = null;
413     result = getTableRegionsAndLocations(zkw, connection, tableName, excludeOfflinedSplitParents,
414       excludeReplicaRegions);
415     return getListOfHRegionInfos(result);
416   }
417 
418   static List<HRegionInfo> getListOfHRegionInfos(final List<Pair<HRegionInfo, ServerName>> pairs) {
419     if (pairs == null || pairs.isEmpty()) return null;
420     List<HRegionInfo> result = new ArrayList<HRegionInfo>(pairs.size());
421     for (Pair<HRegionInfo, ServerName> pair: pairs) {
422       result.add(pair.getFirst());
423     }
424     return result;
425   }
426 
427   /**
428    * @param current region of current table we're working with
429    * @param tableName table we're checking against
430    * @return True if <code>current</code> tablename is equal to
431    * <code>tableName</code>
432    */
433   static boolean isInsideTable(final HRegionInfo current, final TableName tableName) {
434     return tableName.equals(current.getTable());
435   }
436 
437   /**
438    * @param tableName table we're working with
439    * @return Place to start Scan in <code>hbase:meta</code> when passed a
440    * <code>tableName</code>; returns &lt;tableName&rt; &lt;,&rt; &lt;,&rt;
441    */
442   static byte [] getTableStartRowForMeta(TableName tableName) {
443     byte [] startRow = new byte[tableName.getName().length + 2];
444     System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length);
445     startRow[startRow.length - 2] = HConstants.DELIMITER;
446     startRow[startRow.length - 1] = HConstants.DELIMITER;
447     return startRow;
448   }
449 
450   /**
451    * This method creates a Scan object that will only scan catalog rows that
452    * belong to the specified table. It doesn't specify any columns.
453    * This is a better alternative to just using a start row and scan until
454    * it hits a new table since that requires parsing the HRI to get the table
455    * name.
456    * @param tableName bytes of table's name
457    * @return configured Scan object
458    */
459   public static Scan getScanForTableName(TableName tableName) {
460     String strName = tableName.getNameAsString();
461     // Start key is just the table name with delimiters
462     byte[] startKey = Bytes.toBytes(strName + ",,");
463     // Stop key appends the smallest possible char to the table name
464     byte[] stopKey = Bytes.toBytes(strName + " ,,");
465 
466     Scan scan = new Scan(startKey);
467     scan.setStopRow(stopKey);
468     return scan;
469   }
470 
471   /**
472    * @param zkw zookeeper connection to access meta table
473    * @param connection connection we're using
474    * @param tableName table we're looking for
475    * @return Return list of regioninfos and server.
476    * @throws IOException
477    */
478   public static List<Pair<HRegionInfo, ServerName>>
479   getTableRegionsAndLocations(ZooKeeperWatcher zkw,
480                               Connection connection, TableName tableName)
481   throws IOException {
482     return getTableRegionsAndLocations(zkw, connection, tableName, true);
483   }
484 
485   /**
486    * @param zkw ZooKeeperWatcher instance we're using to get hbase:meta location
487    * @param connection connection we're using
488    * @param tableName table to work with
489    * @return Return list of regioninfos and server addresses.
490    * @throws IOException
491    */
492   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
493       ZooKeeperWatcher zkw, Connection connection, final TableName tableName,
494       final boolean excludeOfflinedSplitParents) throws IOException {
495     return getTableRegionsAndLocations(zkw, connection, tableName, excludeOfflinedSplitParents,
496       false);
497   }
498 
499   /**
500    * @param zkw ZooKeeperWatcher instance we're using to get hbase:meta location
501    * @param connection connection we're using
502    * @param tableName table to work with
503    * @param excludeOfflinedSplitParents Exclude offline regions
504    * @param excludeReplicaRegions If true, do not include replica regions in the result.
505    * @return List of regioninfos and server addresses.
506    */
507   public static List<Pair<HRegionInfo, ServerName>> getTableRegionsAndLocations(
508       ZooKeeperWatcher zkw, Connection connection, final TableName tableName,
509       final boolean excludeOfflinedSplitParents, final boolean excludeReplicaRegions)
510           throws IOException {
511     if (tableName.equals(TableName.META_TABLE_NAME)) {
512       // If meta, do a bit of special handling.
513       ServerName serverName = new MetaTableLocator().getMetaRegionLocation(zkw);
514       List<Pair<HRegionInfo, ServerName>> list =
515         new ArrayList<Pair<HRegionInfo, ServerName>>();
516       list.add(new Pair<HRegionInfo, ServerName>(HRegionInfo.FIRST_META_REGIONINFO,
517         serverName));
518       return list;
519     }
520     // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
521     CollectingVisitor<Pair<HRegionInfo, ServerName>> visitor =
522       new CollectingVisitor<Pair<HRegionInfo, ServerName>>() {
523         private RegionLocations current = null;
524 
525         @Override
526         public boolean visit(Result r) throws IOException {
527           current = getRegionLocations(r);
528           if (current == null || current.getRegionLocation().getRegionInfo() == null) {
529             LOG.warn("No serialized HRegionInfo in " + r);
530             return true;
531           }
532           HRegionInfo hri = current.getRegionLocation().getRegionInfo();
533           if (!isInsideTable(hri, tableName)) return false;
534           if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
535           // Else call super and add this Result to the collection.
536           return super.visit(r);
537         }
538 
539         @Override
540         void add(Result r) {
541           if (current == null) {
542             return;
543           }
544           for (HRegionLocation loc : current.getRegionLocations()) {
545             if (loc != null) {
546               // Exclude replica region from the result
547               if (excludeReplicaRegions
548                   && !RegionReplicaUtil.isDefaultReplica(loc.getRegionInfo())) {
549                 continue;
550               }
551               this.results.add(new Pair<HRegionInfo, ServerName>(
552                 loc.getRegionInfo(), loc.getServerName()));
553             }
554           }
555         }
556       };
557     fullScan(connection, visitor, getTableStartRowForMeta(tableName));
558     return visitor.getResults();
559   }
560 
561   /**
562    * @param connection connection we're using
563    * @param serverName server whose regions we're interested in
564    * @return List of user regions installed on this server (does not include
565    * catalog regions).
566    * @throws IOException
567    */
568   public static NavigableMap<HRegionInfo, Result>
569   getServerUserRegions(Connection connection, final ServerName serverName)
570     throws IOException {
571     final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
572     // Fill the above hris map with entries from hbase:meta that have the passed
573     // servername.
574     CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
575       @Override
576       void add(Result r) {
577         if (r == null || r.isEmpty()) return;
578         RegionLocations locations = getRegionLocations(r);
579         if (locations == null) return;
580         for (HRegionLocation loc : locations.getRegionLocations()) {
581           if (loc != null) {
582             if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
583               hris.put(loc.getRegionInfo(), r);
584             }
585           }
586         }
587       }
588     };
589     fullScan(connection, v);
590     return hris;
591   }
592 
593   public static void fullScanMetaAndPrint(Connection connection)
594     throws IOException {
595     Visitor v = new Visitor() {
596       @Override
597       public boolean visit(Result r) throws IOException {
598         if (r ==  null || r.isEmpty()) return true;
599         LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r);
600         RegionLocations locations = getRegionLocations(r);
601         if (locations == null) return true;
602         for (HRegionLocation loc : locations.getRegionLocations()) {
603           if (loc != null) {
604             LOG.info("fullScanMetaAndPrint.HRI Print= " + loc.getRegionInfo());
605           }
606         }
607         return true;
608       }
609     };
610     fullScan(connection, v);
611   }
612 
613   /**
614    * Performs a full scan of a catalog table.
615    * @param connection connection we're using
616    * @param visitor Visitor invoked against each row.
617    * @param startrow Where to start the scan. Pass null if want to begin scan
618    * at first row.
619    * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
620    * @throws IOException
621    */
622   public static void fullScan(Connection connection,
623     final Visitor visitor, final byte [] startrow)
624   throws IOException {
625     Scan scan = new Scan();
626     if (startrow != null) scan.setStartRow(startrow);
627     if (startrow == null) {
628       int caching = connection.getConfiguration()
629           .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
630       scan.setCaching(caching);
631     }
632     scan.addFamily(HConstants.CATALOG_FAMILY);
633     Table metaTable = getMetaHTable(connection);
634     ResultScanner scanner = null;
635     try {
636       scanner = metaTable.getScanner(scan);
637       Result data;
638       while((data = scanner.next()) != null) {
639         if (data.isEmpty()) continue;
640         // Break if visit returns false.
641         if (!visitor.visit(data)) break;
642       }
643     } finally {
644       if (scanner != null) scanner.close();
645       metaTable.close();
646     }
647   }
648 
649   /**
650    * Returns the column family used for meta columns.
651    * @return HConstants.CATALOG_FAMILY.
652    */
653   protected static byte[] getFamily() {
654     return HConstants.CATALOG_FAMILY;
655   }
656 
657   /**
658    * Returns the column qualifier for serialized region info
659    * @return HConstants.REGIONINFO_QUALIFIER
660    */
661   protected static byte[] getRegionInfoColumn() {
662     return HConstants.REGIONINFO_QUALIFIER;
663   }
664 
665   /**
666    * Returns the column qualifier for server column for replicaId
667    * @param replicaId the replicaId of the region
668    * @return a byte[] for server column qualifier
669    */
670   public static byte[] getServerColumn(int replicaId) {
671     return replicaId == 0
672       ? HConstants.SERVER_QUALIFIER
673       : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
674       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
675   }
676 
677   /**
678    * Returns the column qualifier for server start code column for replicaId
679    * @param replicaId the replicaId of the region
680    * @return a byte[] for server start code column qualifier
681    */
682   public static byte[] getStartCodeColumn(int replicaId) {
683     return replicaId == 0
684       ? HConstants.STARTCODE_QUALIFIER
685       : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
686       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
687   }
688 
689   /**
690    * Returns the column qualifier for seqNum column for replicaId
691    * @param replicaId the replicaId of the region
692    * @return a byte[] for seqNum column qualifier
693    */
694   public static byte[] getSeqNumColumn(int replicaId) {
695     return replicaId == 0
696       ? HConstants.SEQNUM_QUALIFIER
697       : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER
698       + String.format(HRegionInfo.REPLICA_ID_FORMAT, replicaId));
699   }
700 
701   /**
702    * Parses the replicaId from the server column qualifier. See top of the class javadoc
703    * for the actual meta layout
704    * @param serverColumn the column qualifier
705    * @return an int for the replicaId
706    */
707   static int parseReplicaIdFromServerColumn(byte[] serverColumn) {
708     String serverStr = Bytes.toString(serverColumn);
709 
710     Matcher matcher = SERVER_COLUMN_PATTERN.matcher(serverStr);
711     if (matcher.matches() && matcher.groupCount() > 0) {
712       String group = matcher.group(1);
713       if (group != null && group.length() > 0) {
714         return Integer.parseInt(group.substring(1), 16);
715       } else {
716         return 0;
717       }
718     }
719     return -1;
720   }
721 
722   /**
723    * Returns a {@link ServerName} from catalog table {@link Result}.
724    * @param r Result to pull from
725    * @return A ServerName instance or null if necessary fields not found or empty.
726    */
727   private static ServerName getServerName(final Result r, final int replicaId) {
728     byte[] serverColumn = getServerColumn(replicaId);
729     Cell cell = r.getColumnLatestCell(getFamily(), serverColumn);
730     if (cell == null || cell.getValueLength() == 0) return null;
731     String hostAndPort = Bytes.toString(
732       cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
733     byte[] startcodeColumn = getStartCodeColumn(replicaId);
734     cell = r.getColumnLatestCell(getFamily(), startcodeColumn);
735     if (cell == null || cell.getValueLength() == 0) return null;
736     return ServerName.valueOf(hostAndPort,
737       Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
738   }
739 
740   /**
741    * The latest seqnum that the server writing to meta observed when opening the region.
742    * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written.
743    * @param r Result to pull the seqNum from
744    * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written.
745    */
746   private static long getSeqNumDuringOpen(final Result r, final int replicaId) {
747     Cell cell = r.getColumnLatestCell(getFamily(), getSeqNumColumn(replicaId));
748     if (cell == null || cell.getValueLength() == 0) return HConstants.NO_SEQNUM;
749     return Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
750   }
751 
752   /**
753    * Returns an HRegionLocationList extracted from the result.
754    * @return an HRegionLocationList containing all locations for the region range or null if
755    *  we can't deserialize the result.
756    */
757   public static RegionLocations getRegionLocations(final Result r) {
758     if (r == null) return null;
759     HRegionInfo regionInfo = getHRegionInfo(r, getRegionInfoColumn());
760     if (regionInfo == null) return null;
761 
762     List<HRegionLocation> locations = new ArrayList<HRegionLocation>(1);
763     NavigableMap<byte[],NavigableMap<byte[],byte[]>> familyMap = r.getNoVersionMap();
764 
765     locations.add(getRegionLocation(r, regionInfo, regionInfo.getReplicaId()));
766 
767     NavigableMap<byte[], byte[]> infoMap = familyMap.get(getFamily());
768     if (infoMap == null) return new RegionLocations(locations);
769 
770     // iterate until all serverName columns are seen
771     int replicaId = 0;
772     byte[] serverColumn = getServerColumn(replicaId);
773     SortedMap<byte[], byte[]> serverMap = null;
774     serverMap = infoMap.tailMap(serverColumn, false);
775 
776     if (serverMap.isEmpty()) return new RegionLocations(locations);
777 
778     for (Map.Entry<byte[], byte[]> entry : serverMap.entrySet()) {
779       replicaId = parseReplicaIdFromServerColumn(entry.getKey());
780       if (replicaId < 0) {
781         break;
782       }
783       HRegionLocation location = getRegionLocation(r, regionInfo, replicaId);
784       // In case the region replica is newly created, it's location might be null. We usually do not
785       // have HRL's in RegionLocations object with null ServerName. They are handled as null HRLs.
786       if (location == null || location.getServerName() == null) {
787         locations.add(null);
788       } else {
789         locations.add(location);
790       }
791     }
792 
793     return new RegionLocations(locations);
794   }
795 
796   /**
797    * Returns the HRegionLocation parsed from the given meta row Result
798    * for the given regionInfo and replicaId. The regionInfo can be the default region info
799    * for the replica.
800    * @param r the meta row result
801    * @param regionInfo RegionInfo for default replica
802    * @param replicaId the replicaId for the HRegionLocation
803    * @return HRegionLocation parsed from the given meta row Result for the given replicaId
804    */
805   private static HRegionLocation getRegionLocation(final Result r, final HRegionInfo regionInfo,
806                                                    final int replicaId) {
807     ServerName serverName = getServerName(r, replicaId);
808     long seqNum = getSeqNumDuringOpen(r, replicaId);
809     HRegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId);
810     return new HRegionLocation(replicaInfo, serverName, seqNum);
811   }
812 
813   /**
814    * Returns HRegionInfo object from the column
815    * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
816    * table Result.
817    * @param data a Result object from the catalog table scan
818    * @return HRegionInfo or null
819    */
820   public static HRegionInfo getHRegionInfo(Result data) {
821     return getHRegionInfo(data, HConstants.REGIONINFO_QUALIFIER);
822   }
823 
824   /**
825    * Returns the HRegionInfo object from the column {@link HConstants#CATALOG_FAMILY} and
826    * <code>qualifier</code> of the catalog table result.
827    * @param r a Result object from the catalog table scan
828    * @param qualifier Column family qualifier
829    * @return An HRegionInfo instance or null.
830    */
831   private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier) {
832     Cell cell = r.getColumnLatestCell(getFamily(), qualifier);
833     if (cell == null) return null;
834     return HRegionInfo.parseFromOrNull(cell.getValueArray(),
835       cell.getValueOffset(), cell.getValueLength());
836   }
837 
838   /**
839    * Returns the daughter regions by reading the corresponding columns of the catalog table
840    * Result.
841    * @param connection connection we're using
842    * @param parent region information of parent
843    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
844    *   parent
845    */
846   public static PairOfSameType<HRegionInfo> getDaughterRegionsFromParent(
847     final Connection connection, HRegionInfo parent) throws IOException {
848     Result parentResult = getRegionResult(connection, parent.getRegionName());
849     return getDaughterRegions(parentResult);
850   }
851 
852   /**
853    * Returns the daughter regions by reading the corresponding columns of the catalog table
854    * Result.
855    * @param data a Result object from the catalog table scan
856    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
857    * parent
858    */
859   public static PairOfSameType<HRegionInfo> getDaughterRegions(Result data) {
860     HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
861     HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
862 
863     return new PairOfSameType<HRegionInfo>(splitA, splitB);
864   }
865 
866   /**
867    * Returns the merge regions by reading the corresponding columns of the catalog table
868    * Result.
869    * @param data a Result object from the catalog table scan
870    * @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
871    * parent
872    */
873   public static PairOfSameType<HRegionInfo> getMergeRegions(Result data) {
874     HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
875     HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
876 
877     return new PairOfSameType<HRegionInfo>(mergeA, mergeB);
878   }
879 
880   /**
881    * Implementations 'visit' a catalog table row.
882    */
883   public interface Visitor {
884     /**
885      * Visit the catalog table row.
886      * @param r A row from catalog table
887      * @return True if we are to proceed scanning the table, else false if
888      * we are to stop now.
889      */
890     boolean visit(final Result r) throws IOException;
891   }
892 
893   /**
894    * A {@link Visitor} that collects content out of passed {@link Result}.
895    */
896   static abstract class CollectingVisitor<T> implements Visitor {
897     final List<T> results = new ArrayList<T>();
898     @Override
899     public boolean visit(Result r) throws IOException {
900       if (r ==  null || r.isEmpty()) return true;
901       add(r);
902       return true;
903     }
904 
905     abstract void add(Result r);
906 
907     /**
908      * @return Collected results; wait till visits complete to collect all
909      * possible results
910      */
911     List<T> getResults() {
912       return this.results;
913     }
914   }
915 
916   /**
917    * Collects all returned.
918    */
919   static class CollectAllVisitor extends CollectingVisitor<Result> {
920     @Override
921     void add(Result r) {
922       this.results.add(r);
923     }
924   }
925 
926   /**
927    * Count regions in <code>hbase:meta</code> for passed table.
928    * @param c Configuration object
929    * @param tableName table name to count regions for
930    * @return Count or regions in table <code>tableName</code>
931    * @throws IOException
932    */
933   @Deprecated
934   public static int getRegionCount(final Configuration c, final String tableName)
935       throws IOException {
936     return getRegionCount(c, TableName.valueOf(tableName));
937   }
938 
939   /**
940    * Count regions in <code>hbase:meta</code> for passed table.
941    * @param c Configuration object
942    * @param tableName table name to count regions for
943    * @return Count or regions in table <code>tableName</code>
944    * @throws IOException
945    */
946   public static int getRegionCount(final Configuration c, final TableName tableName)
947   throws IOException {
948     try (Connection connection = ConnectionFactory.createConnection(c)) {
949       return getRegionCount(connection, tableName);
950     }
951   }
952 
953   /**
954    * Count regions in <code>hbase:meta</code> for passed table.
955    * @param connection Connection object
956    * @param tableName table name to count regions for
957    * @return Count or regions in table <code>tableName</code>
958    * @throws IOException
959    */
960   public static int getRegionCount(final Connection connection, final TableName tableName)
961   throws IOException {
962     try (RegionLocator locator = connection.getRegionLocator(tableName)) {
963       List<HRegionLocation> locations = locator.getAllRegionLocations();
964       return locations == null? 0: locations.size();
965     }
966   }
967 
968   ////////////////////////
969   // Editing operations //
970   ////////////////////////
971 
972   /**
973    * Generates and returns a Put containing the region into for the catalog table
974    */
975   public static Put makePutFromRegionInfo(HRegionInfo regionInfo)
976     throws IOException {
977     return makePutFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP);
978   }
979   /**
980    * Generates and returns a Put containing the region into for the catalog table
981    */
982   public static Put makePutFromRegionInfo(HRegionInfo regionInfo, long ts)
983     throws IOException {
984     Put put = new Put(regionInfo.getRegionName(), ts);
985     addRegionInfo(put, regionInfo);
986     return put;
987   }
988 
989   /**
990    * Generates and returns a Delete containing the region info for the catalog
991    * table
992    */
993   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo) {
994     return makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP);
995   }
996 
997   /**
998    * Generates and returns a Delete containing the region info for the catalog
999    * table
1000    */
1001   public static Delete makeDeleteFromRegionInfo(HRegionInfo regionInfo, long ts) {
1002     if (regionInfo == null) {
1003       throw new IllegalArgumentException("Can't make a delete for null region");
1004     }
1005     Delete delete = new Delete(regionInfo.getRegionName(), ts);
1006     return delete;
1007   }
1008 
1009   /**
1010    * Adds split daughters to the Put
1011    */
1012   public static Put addDaughtersToPut(Put put, HRegionInfo splitA, HRegionInfo splitB) {
1013     if (splitA != null) {
1014       put.addImmutable(
1015         HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, splitA.toByteArray());
1016     }
1017     if (splitB != null) {
1018       put.addImmutable(
1019         HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, splitB.toByteArray());
1020     }
1021     return put;
1022   }
1023 
1024   /**
1025    * Put the passed <code>p</code> to the <code>hbase:meta</code> table.
1026    * @param connection connection we're using
1027    * @param p Put to add to hbase:meta
1028    * @throws IOException
1029    */
1030   static void putToMetaTable(final Connection connection, final Put p)
1031     throws IOException {
1032     put(getMetaHTable(connection), p);
1033   }
1034 
1035   /**
1036    * @param t Table to use (will be closed when done).
1037    * @param p put to make
1038    * @throws IOException
1039    */
1040   private static void put(final Table t, final Put p) throws IOException {
1041     try {
1042       t.put(p);
1043     } finally {
1044       t.close();
1045     }
1046   }
1047 
1048   /**
1049    * Put the passed <code>ps</code> to the <code>hbase:meta</code> table.
1050    * @param connection connection we're using
1051    * @param ps Put to add to hbase:meta
1052    * @throws IOException
1053    */
1054   public static void putsToMetaTable(final Connection connection, final List<Put> ps)
1055     throws IOException {
1056     Table t = getMetaHTable(connection);
1057     try {
1058       t.put(ps);
1059     } finally {
1060       t.close();
1061     }
1062   }
1063 
1064   /**
1065    * Delete the passed <code>d</code> from the <code>hbase:meta</code> table.
1066    * @param connection connection we're using
1067    * @param d Delete to add to hbase:meta
1068    * @throws IOException
1069    */
1070   static void deleteFromMetaTable(final Connection connection, final Delete d)
1071     throws IOException {
1072     List<Delete> dels = new ArrayList<Delete>(1);
1073     dels.add(d);
1074     deleteFromMetaTable(connection, dels);
1075   }
1076 
1077   /**
1078    * Delete the passed <code>deletes</code> from the <code>hbase:meta</code> table.
1079    * @param connection connection we're using
1080    * @param deletes Deletes to add to hbase:meta  This list should support #remove.
1081    * @throws IOException
1082    */
1083   public static void deleteFromMetaTable(final Connection connection, final List<Delete> deletes)
1084     throws IOException {
1085     Table t = getMetaHTable(connection);
1086     try {
1087       t.delete(deletes);
1088     } finally {
1089       t.close();
1090     }
1091   }
1092 
1093   /**
1094    * Deletes some replica columns corresponding to replicas for the passed rows
1095    * @param metaRows rows in hbase:meta
1096    * @param replicaIndexToDeleteFrom the replica ID we would start deleting from
1097    * @param numReplicasToRemove how many replicas to remove
1098    * @param connection connection we're using to access meta table
1099    * @throws IOException
1100    */
1101   public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
1102     int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
1103       throws IOException {
1104     int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
1105     for (byte[] row : metaRows) {
1106       Delete deleteReplicaLocations = new Delete(row);
1107       for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
1108         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1109           getServerColumn(i));
1110         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1111           getSeqNumColumn(i));
1112         deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
1113           getStartCodeColumn(i));
1114       }
1115       deleteFromMetaTable(connection, deleteReplicaLocations);
1116     }
1117   }
1118 
1119   /**
1120    * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
1121    * @param connection connection we're using
1122    * @param mutations Puts and Deletes to execute on hbase:meta
1123    * @throws IOException
1124    */
1125   public static void mutateMetaTable(final Connection connection,
1126                                      final List<Mutation> mutations)
1127     throws IOException {
1128     Table t = getMetaHTable(connection);
1129     try {
1130       t.batch(mutations);
1131     } catch (InterruptedException e) {
1132       InterruptedIOException ie = new InterruptedIOException(e.getMessage());
1133       ie.initCause(e);
1134       throw ie;
1135     } finally {
1136       t.close();
1137     }
1138   }
1139 
1140   /**
1141    * Adds a hbase:meta row for the specified new region.
1142    * @param connection connection we're using
1143    * @param regionInfo region information
1144    * @throws IOException if problem connecting or updating meta
1145    */
1146   public static void addRegionToMeta(Connection connection,
1147                                      HRegionInfo regionInfo)
1148     throws IOException {
1149     putToMetaTable(connection, makePutFromRegionInfo(regionInfo));
1150     LOG.info("Added " + regionInfo.getRegionNameAsString());
1151   }
1152 
1153   /**
1154    * Adds a hbase:meta row for the specified new region to the given catalog table. The
1155    * Table is not flushed or closed.
1156    * @param meta the Table for META
1157    * @param regionInfo region information
1158    * @throws IOException if problem connecting or updating meta
1159    */
1160   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo) throws IOException {
1161     addRegionToMeta(meta, regionInfo, null, null);
1162   }
1163 
1164   /**
1165    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1166    * does not add its daughter's as different rows, but adds information about the daughters
1167    * in the same row as the parent. Use
1168    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName, int)}
1169    * if you want to do that.
1170    * @param meta the Table for META
1171    * @param regionInfo region information
1172    * @param splitA first split daughter of the parent regionInfo
1173    * @param splitB second split daughter of the parent regionInfo
1174    * @throws IOException if problem connecting or updating meta
1175    */
1176   public static void addRegionToMeta(Table meta, HRegionInfo regionInfo,
1177                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1178     Put put = makePutFromRegionInfo(regionInfo);
1179     addDaughtersToPut(put, splitA, splitB);
1180     meta.put(put);
1181     if (LOG.isDebugEnabled()) {
1182       LOG.debug("Added " + regionInfo.getRegionNameAsString());
1183     }
1184   }
1185 
1186   /**
1187    * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this
1188    * does not add its daughter's as different rows, but adds information about the daughters
1189    * in the same row as the parent. Use
1190    * {@link #splitRegion(Connection, HRegionInfo, HRegionInfo, HRegionInfo, ServerName, int)}
1191    * if you want to do that.
1192    * @param connection connection we're using
1193    * @param regionInfo region information
1194    * @param splitA first split daughter of the parent regionInfo
1195    * @param splitB second split daughter of the parent regionInfo
1196    * @throws IOException if problem connecting or updating meta
1197    */
1198   public static void addRegionToMeta(Connection connection, HRegionInfo regionInfo,
1199                                      HRegionInfo splitA, HRegionInfo splitB) throws IOException {
1200     Table meta = getMetaHTable(connection);
1201     try {
1202       addRegionToMeta(meta, regionInfo, splitA, splitB);
1203     } finally {
1204       meta.close();
1205     }
1206   }
1207 
1208   /**
1209    * Adds a hbase:meta row for each of the specified new regions.
1210    * @param connection connection we're using
1211    * @param regionInfos region information list
1212    * @throws IOException if problem connecting or updating meta
1213    */
1214   public static void addRegionsToMeta(Connection connection,
1215                                       List<HRegionInfo> regionInfos, int regionReplication)
1216     throws IOException {
1217     addRegionsToMeta(connection, regionInfos, regionReplication, HConstants.LATEST_TIMESTAMP);
1218   }
1219   /**
1220    * Adds a hbase:meta row for each of the specified new regions.
1221    * @param connection connection we're using
1222    * @param regionInfos region information list
1223    * @param regionReplication
1224    * @param ts desired timestamp
1225    * @throws IOException if problem connecting or updating meta
1226    */
1227   public static void addRegionsToMeta(Connection connection,
1228       List<HRegionInfo> regionInfos, int regionReplication, long ts)
1229           throws IOException {
1230     List<Put> puts = new ArrayList<Put>();
1231     for (HRegionInfo regionInfo : regionInfos) {
1232       if (RegionReplicaUtil.isDefaultReplica(regionInfo)) {
1233         Put put = makePutFromRegionInfo(regionInfo, ts);
1234         // Add empty locations for region replicas so that number of replicas can be cached
1235         // whenever the primary region is looked up from meta
1236         for (int i = 1; i < regionReplication; i++) {
1237           addEmptyLocation(put, i);
1238         }
1239         puts.add(put);
1240       }
1241     }
1242     putsToMetaTable(connection, puts);
1243     LOG.info("Added " + puts.size());
1244   }
1245 
1246   /**
1247    * Adds a daughter region entry to meta.
1248    * @param regionInfo the region to put
1249    * @param sn the location of the region
1250    * @param openSeqNum the latest sequence number obtained when the region was open
1251    */
1252   public static void addDaughter(final Connection connection,
1253       final HRegionInfo regionInfo, final ServerName sn, final long openSeqNum)
1254       throws NotAllMetaRegionsOnlineException, IOException {
1255     Put put = new Put(regionInfo.getRegionName());
1256     addRegionInfo(put, regionInfo);
1257     if (sn != null) {
1258       addLocation(put, sn, openSeqNum, -1, regionInfo.getReplicaId());
1259     }
1260     putToMetaTable(connection, put);
1261     LOG.info("Added daughter " + regionInfo.getEncodedName() +
1262       (sn == null? ", serverName=null": ", serverName=" + sn.toString()));
1263   }
1264 
1265   /**
1266    * Merge the two regions into one in an atomic operation. Deletes the two
1267    * merging regions in hbase:meta and adds the merged region with the information of
1268    * two merging regions.
1269    * @param connection connection we're using
1270    * @param mergedRegion the merged region
1271    * @param regionA
1272    * @param regionB
1273    * @param sn the location of the region
1274    * @param masterSystemTime
1275    * @throws IOException
1276    */
1277   public static void mergeRegions(final Connection connection, HRegionInfo mergedRegion,
1278       HRegionInfo regionA, HRegionInfo regionB, ServerName sn, int regionReplication,
1279       long masterSystemTime)
1280           throws IOException {
1281     Table meta = getMetaHTable(connection);
1282     try {
1283       HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
1284 
1285       // use the maximum of what master passed us vs local time.
1286       long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
1287 
1288       // Put for parent
1289       Put putOfMerged = makePutFromRegionInfo(copyOfMerged, time);
1290       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
1291         regionA.toByteArray());
1292       putOfMerged.addImmutable(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
1293         regionB.toByteArray());
1294 
1295       // Deletes for merging regions
1296       Delete deleteA = makeDeleteFromRegionInfo(regionA, time);
1297       Delete deleteB = makeDeleteFromRegionInfo(regionB, time);
1298 
1299       // The merged is a new region, openSeqNum = 1 is fine.
1300       addLocation(putOfMerged, sn, 1, -1, mergedRegion.getReplicaId());
1301 
1302       // Add empty locations for region replicas of the merged region so that number of replicas can
1303       // be cached whenever the primary region is looked up from meta
1304       for (int i = 1; i < regionReplication; i++) {
1305         addEmptyLocation(putOfMerged, i);
1306       }
1307 
1308       byte[] tableRow = Bytes.toBytes(mergedRegion.getRegionNameAsString()
1309         + HConstants.DELIMITER);
1310       multiMutate(meta, tableRow, putOfMerged, deleteA, deleteB);
1311     } finally {
1312       meta.close();
1313     }
1314   }
1315 
1316   /**
1317    * Splits the region into two in an atomic operation. Offlines the parent
1318    * region with the information that it is split into two, and also adds
1319    * the daughter regions. Does not add the location information to the daughter
1320    * regions since they are not open yet.
1321    * @param connection connection we're using
1322    * @param parent the parent region which is split
1323    * @param splitA Split daughter region A
1324    * @param splitB Split daughter region A
1325    * @param sn the location of the region
1326    */
1327   public static void splitRegion(final Connection connection,
1328                                  HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
1329                                  ServerName sn, int regionReplication) throws IOException {
1330     Table meta = getMetaHTable(connection);
1331     try {
1332       HRegionInfo copyOfParent = new HRegionInfo(parent);
1333       copyOfParent.setOffline(true);
1334       copyOfParent.setSplit(true);
1335 
1336       //Put for parent
1337       Put putParent = makePutFromRegionInfo(copyOfParent);
1338       addDaughtersToPut(putParent, splitA, splitB);
1339 
1340       //Puts for daughters
1341       Put putA = makePutFromRegionInfo(splitA);
1342       Put putB = makePutFromRegionInfo(splitB);
1343 
1344       addSequenceNum(putA, 1, -1, splitA.getReplicaId()); //new regions, openSeqNum = 1 is fine.
1345       addSequenceNum(putB, 1, -1, splitB.getReplicaId());
1346 
1347       // Add empty locations for region replicas of daughters so that number of replicas can be
1348       // cached whenever the primary region is looked up from meta
1349       for (int i = 1; i < regionReplication; i++) {
1350         addEmptyLocation(putA, i);
1351         addEmptyLocation(putB, i);
1352       }
1353 
1354       byte[] tableRow = Bytes.toBytes(parent.getRegionNameAsString() + HConstants.DELIMITER);
1355       multiMutate(meta, tableRow, putParent, putA, putB);
1356     } finally {
1357       meta.close();
1358     }
1359   }
1360 
1361   /**
1362    * Performs an atomic multi-Mutate operation against the given table.
1363    */
1364   private static void multiMutate(Table table, byte[] row, Mutation... mutations)
1365       throws IOException {
1366     CoprocessorRpcChannel channel = table.coprocessorService(row);
1367     MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
1368       = MultiRowMutationProtos.MutateRowsRequest.newBuilder();
1369     for (Mutation mutation : mutations) {
1370       if (mutation instanceof Put) {
1371         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1372           ClientProtos.MutationProto.MutationType.PUT, mutation));
1373       } else if (mutation instanceof Delete) {
1374         mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
1375           ClientProtos.MutationProto.MutationType.DELETE, mutation));
1376       } else {
1377         throw new DoNotRetryIOException("multi in MetaEditor doesn't support "
1378           + mutation.getClass().getName());
1379       }
1380     }
1381 
1382     MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
1383       MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
1384     try {
1385       service.mutateRows(null, mmrBuilder.build());
1386     } catch (ServiceException ex) {
1387       ProtobufUtil.toIOException(ex);
1388     }
1389   }
1390 
1391   /**
1392    * Updates the location of the specified region in hbase:meta to be the specified
1393    * server hostname and startcode.
1394    * <p>
1395    * Uses passed catalog tracker to get a connection to the server hosting
1396    * hbase:meta and makes edits to that region.
1397    *
1398    * @param connection connection we're using
1399    * @param regionInfo region to update location of
1400    * @param openSeqNum the latest sequence number obtained when the region was open
1401    * @param sn Server name
1402    * @param masterSystemTime wall clock time from master if passed in the open region RPC or -1
1403    * @throws IOException
1404    */
1405   public static void updateRegionLocation(Connection connection,
1406                                           HRegionInfo regionInfo, ServerName sn, long openSeqNum,
1407                                           long masterSystemTime)
1408     throws IOException {
1409     updateLocation(connection, regionInfo, sn, openSeqNum, masterSystemTime);
1410   }
1411 
1412   /**
1413    * Updates the location of the specified region to be the specified server.
1414    * <p>
1415    * Connects to the specified server which should be hosting the specified
1416    * catalog region name to perform the edit.
1417    *
1418    * @param connection connection we're using
1419    * @param regionInfo region to update location of
1420    * @param sn Server name
1421    * @param openSeqNum the latest sequence number obtained when the region was open
1422    * @param masterSystemTime wall clock time from master if passed in the open region RPC or -1
1423    * @throws IOException In particular could throw {@link java.net.ConnectException}
1424    * if the server is down on other end.
1425    */
1426   private static void updateLocation(final Connection connection,
1427                                      HRegionInfo regionInfo, ServerName sn, long openSeqNum,
1428                                      long masterSystemTime)
1429     throws IOException {
1430 
1431     // use the maximum of what master passed us vs local time.
1432     long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
1433 
1434     // region replicas are kept in the primary region's row
1435     Put put = new Put(getMetaKeyForRegion(regionInfo), time);
1436     HRegionInfo defaultRegionInfo = regionInfo;
1437     if (regionInfo.getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
1438       defaultRegionInfo = new HRegionInfo(regionInfo, HRegionInfo.DEFAULT_REPLICA_ID);
1439     }
1440     addRegionInfo(put, defaultRegionInfo);
1441     addLocation(put, sn, openSeqNum, time, regionInfo.getReplicaId());
1442     putToMetaTable(connection, put);
1443     LOG.info("Updated row " + defaultRegionInfo.getRegionNameAsString() +
1444       " with server=" + sn);
1445   }
1446 
1447   /**
1448    * Deletes the specified region from META.
1449    * @param connection connection we're using
1450    * @param regionInfo region to be deleted from META
1451    * @throws IOException
1452    */
1453   public static void deleteRegion(Connection connection,
1454                                   HRegionInfo regionInfo)
1455     throws IOException {
1456     Delete delete = new Delete(regionInfo.getRegionName());
1457     deleteFromMetaTable(connection, delete);
1458     LOG.info("Deleted " + regionInfo.getRegionNameAsString());
1459   }
1460 
1461   /**
1462    * Deletes the specified regions from META.
1463    * @param connection connection we're using
1464    * @param regionsInfo list of regions to be deleted from META
1465    * @param ts desired timestamp
1466    * @throws IOException
1467    */
1468   public static void deleteRegions(Connection connection,
1469                                    List<HRegionInfo> regionsInfo, long ts) throws IOException {
1470     List<Delete> deletes = new ArrayList<Delete>(regionsInfo.size());
1471     for (HRegionInfo hri: regionsInfo) {
1472       deletes.add(new Delete(hri.getRegionName(), ts));
1473     }
1474     deleteFromMetaTable(connection, deletes);
1475     LOG.info("Deleted " + regionsInfo);
1476   }
1477   /**
1478    * Deletes the specified regions from META.
1479    * @param connection connection we're using
1480    * @param regionsInfo list of regions to be deleted from META
1481    * @throws IOException
1482    */
1483   public static void deleteRegions(Connection connection,
1484                                    List<HRegionInfo> regionsInfo) throws IOException {
1485     deleteRegions(connection, regionsInfo, HConstants.LATEST_TIMESTAMP);
1486   }
1487 
1488   /**
1489    * Adds and Removes the specified regions from hbase:meta
1490    * @param connection connection we're using
1491    * @param regionsToRemove list of regions to be deleted from META
1492    * @param regionsToAdd list of regions to be added to META
1493    * @throws IOException
1494    */
1495   public static void mutateRegions(Connection connection,
1496                                    final List<HRegionInfo> regionsToRemove,
1497                                    final List<HRegionInfo> regionsToAdd)
1498     throws IOException {
1499     List<Mutation> mutation = new ArrayList<Mutation>();
1500     if (regionsToRemove != null) {
1501       for (HRegionInfo hri: regionsToRemove) {
1502         mutation.add(new Delete(hri.getRegionName()));
1503       }
1504     }
1505     if (regionsToAdd != null) {
1506       for (HRegionInfo hri: regionsToAdd) {
1507         mutation.add(makePutFromRegionInfo(hri));
1508       }
1509     }
1510     mutateMetaTable(connection, mutation);
1511     if (regionsToRemove != null && regionsToRemove.size() > 0) {
1512       LOG.debug("Deleted " + regionsToRemove);
1513     }
1514     if (regionsToAdd != null && regionsToAdd.size() > 0) {
1515       LOG.debug("Added " + regionsToAdd);
1516     }
1517   }
1518 
1519   /**
1520    * Overwrites the specified regions from hbase:meta
1521    * @param connection connection we're using
1522    * @param regionInfos list of regions to be added to META
1523    * @throws IOException
1524    */
1525   public static void overwriteRegions(Connection connection,
1526       List<HRegionInfo> regionInfos, int regionReplication) throws IOException {
1527     // use master time for delete marker and the Put
1528     long now = EnvironmentEdgeManager.currentTime();
1529     deleteRegions(connection, regionInfos, now);
1530     // Why sleep? This is the easiest way to ensure that the previous deletes does not
1531     // eclipse the following puts, that might happen in the same ts from the server.
1532     // See HBASE-9906, and HBASE-9879. Once either HBASE-9879, HBASE-8770 is fixed,
1533     // or HBASE-9905 is fixed and meta uses seqIds, we do not need the sleep.
1534     //
1535     // HBASE-13875 uses master timestamp for the mutations. The 20ms sleep is not needed
1536     addRegionsToMeta(connection, regionInfos, regionReplication, now+1);
1537     LOG.info("Overwritten " + regionInfos);
1538   }
1539 
1540   /**
1541    * Deletes merge qualifiers for the specified merged region.
1542    * @param connection connection we're using
1543    * @param mergedRegion
1544    * @throws IOException
1545    */
1546   public static void deleteMergeQualifiers(Connection connection,
1547                                            final HRegionInfo mergedRegion) throws IOException {
1548     Delete delete = new Delete(mergedRegion.getRegionName());
1549     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER);
1550     delete.deleteColumns(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER);
1551     deleteFromMetaTable(connection, delete);
1552     LOG.info("Deleted references in merged region "
1553       + mergedRegion.getRegionNameAsString() + ", qualifier="
1554       + Bytes.toStringBinary(HConstants.MERGEA_QUALIFIER) + " and qualifier="
1555       + Bytes.toStringBinary(HConstants.MERGEB_QUALIFIER));
1556   }
1557 
1558   private static Put addRegionInfo(final Put p, final HRegionInfo hri)
1559     throws IOException {
1560     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
1561       hri.toByteArray());
1562     return p;
1563   }
1564 
1565   public static Put addLocation(final Put p, final ServerName sn, long openSeqNum,
1566       long time, int replicaId){
1567     if (time <= 0) {
1568       time = EnvironmentEdgeManager.currentTime();
1569     }
1570     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), time,
1571       Bytes.toBytes(sn.getHostAndPort()));
1572     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), time,
1573       Bytes.toBytes(sn.getStartcode()));
1574     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), time,
1575       Bytes.toBytes(openSeqNum));
1576     return p;
1577   }
1578 
1579   public static Put addEmptyLocation(final Put p, int replicaId) {
1580     long now = EnvironmentEdgeManager.currentTime();
1581     p.addImmutable(HConstants.CATALOG_FAMILY, getServerColumn(replicaId), now, null);
1582     p.addImmutable(HConstants.CATALOG_FAMILY, getStartCodeColumn(replicaId), now, null);
1583     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), now, null);
1584     return p;
1585   }
1586 
1587   public static Put addSequenceNum(final Put p, long openSeqNum, long time, int replicaId) {
1588     if (time <= 0) {
1589       time = EnvironmentEdgeManager.currentTime();
1590     }
1591     p.addImmutable(HConstants.CATALOG_FAMILY, getSeqNumColumn(replicaId), time,
1592       Bytes.toBytes(openSeqNum));
1593     return p;
1594   }
1595 
1596   /**
1597    * Checks whether hbase:meta contains any info:server entry.
1598    * @param connection connection we're using
1599    * @return true if hbase:meta contains any info:server entry, false if not
1600    * @throws IOException
1601    */
1602   public static boolean infoServerExists(Connection connection) throws IOException {
1603     // Make a version of ResultCollectingVisitor that only collects the first
1604     CollectingVisitor<Result> visitor = new CollectingVisitor<Result>() {
1605       @Override
1606       public boolean visit(Result r) throws IOException {
1607         if (r == null || r.isEmpty()) return true;
1608         RegionLocations locations = getRegionLocations(r);
1609         if (locations == null) return true;
1610         for (HRegionLocation loc : locations.getRegionLocations()) {
1611           if (loc != null) {
1612             if (loc.getServerName() != null) {
1613               add(r);
1614               // Stop collecting results after we get one.
1615               return false;
1616             }
1617           }
1618         }
1619         return true;
1620       }
1621 
1622       @Override
1623       void add(Result r) {
1624         this.results.add(r);
1625       }
1626     };
1627     fullScan(connection, visitor);
1628     // If visitor has results >= 1 then hbase:meta has the info:server entry
1629     return visitor.getResults().size() >= 1;
1630   }
1631 }