001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.client; 020 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.Collection; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.HashSet; 028import java.util.List; 029import java.util.Map; 030import java.util.Objects; 031import java.util.Optional; 032import java.util.Set; 033import java.util.TreeMap; 034import java.util.TreeSet; 035import java.util.function.Function; 036import java.util.regex.Matcher; 037import java.util.regex.Pattern; 038import org.apache.hadoop.fs.Path; 039import org.apache.hadoop.hbase.Coprocessor; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.TableName; 042import org.apache.hadoop.hbase.exceptions.DeserializationException; 043import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; 044import org.apache.hadoop.hbase.security.User; 045import org.apache.hadoop.hbase.util.Bytes; 046import org.apache.yetus.audience.InterfaceAudience; 047import org.slf4j.Logger; 048import org.slf4j.LoggerFactory; 049 050import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 051import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 052 053/** 054 * Convenience class for composing an instance of {@link TableDescriptor}. 055 * @since 2.0.0 056 */ 057@InterfaceAudience.Public 058public class TableDescriptorBuilder { 059 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 060 @InterfaceAudience.Private 061 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 062 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 063 /** 064 * Used by HBase Shell interface to access this metadata 065 * attribute which denotes the maximum size of the store file after which a 066 * region split occurs. 067 */ 068 @InterfaceAudience.Private 069 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 070 private static final Bytes MAX_FILESIZE_KEY 071 = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 072 073 @InterfaceAudience.Private 074 public static final String OWNER = "OWNER"; 075 @InterfaceAudience.Private 076 public static final Bytes OWNER_KEY 077 = new Bytes(Bytes.toBytes(OWNER)); 078 079 /** 080 * Used by rest interface to access this metadata attribute 081 * which denotes if the table is Read Only. 082 */ 083 @InterfaceAudience.Private 084 public static final String READONLY = "READONLY"; 085 private static final Bytes READONLY_KEY 086 = new Bytes(Bytes.toBytes(READONLY)); 087 088 /** 089 * Used by HBase Shell interface to access this metadata 090 * attribute which denotes if the table is compaction enabled. 091 */ 092 @InterfaceAudience.Private 093 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 094 private static final Bytes COMPACTION_ENABLED_KEY 095 = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 096 097 /** 098 * Used by HBase Shell interface to access this metadata 099 * attribute which denotes if the table is split enabled. 100 */ 101 @InterfaceAudience.Private 102 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 103 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 104 105 /** 106 * Used by HBase Shell interface to access this metadata 107 * attribute which denotes if the table is merge enabled. 108 */ 109 @InterfaceAudience.Private 110 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 111 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 112 113 /** 114 * Constant that denotes whether the table is normalized by default. 115 */ 116 @InterfaceAudience.Private 117 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 118 119 /** 120 * Used by HBase Shell interface to access this metadata 121 * attribute which represents the maximum size of the memstore after which its 122 * contents are flushed onto the disk. 123 */ 124 @InterfaceAudience.Private 125 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 126 private static final Bytes MEMSTORE_FLUSHSIZE_KEY 127 = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 128 129 @InterfaceAudience.Private 130 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 131 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 132 /** 133 * Used by rest interface to access this metadata attribute 134 * which denotes if it is a catalog table, either <code> hbase:meta </code>. 135 */ 136 @InterfaceAudience.Private 137 public static final String IS_META = "IS_META"; 138 private static final Bytes IS_META_KEY 139 = new Bytes(Bytes.toBytes(IS_META)); 140 141 /** 142 * {@link Durability} setting for the table. 143 */ 144 @InterfaceAudience.Private 145 public static final String DURABILITY = "DURABILITY"; 146 private static final Bytes DURABILITY_KEY 147 = new Bytes(Bytes.toBytes("DURABILITY")); 148 149 /** 150 * The number of region replicas for the table. 151 */ 152 @InterfaceAudience.Private 153 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 154 private static final Bytes REGION_REPLICATION_KEY 155 = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 156 157 /** 158 * The flag to indicate whether or not the memstore should be 159 * replicated for read-replicas (CONSISTENCY => TIMELINE). 160 */ 161 @InterfaceAudience.Private 162 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 163 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY 164 = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 165 166 private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY 167 = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); 168 /** 169 * Used by shell/rest interface to access this metadata 170 * attribute which denotes if the table should be treated by region 171 * normalizer. 172 */ 173 @InterfaceAudience.Private 174 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 175 private static final Bytes NORMALIZATION_ENABLED_KEY 176 = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 177 178 @InterfaceAudience.Private 179 public static final String NORMALIZER_TARGET_REGION_COUNT = 180 "NORMALIZER_TARGET_REGION_COUNT"; 181 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 182 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 183 184 @InterfaceAudience.Private 185 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 186 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 187 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 188 189 /** 190 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global 191 * default value 192 */ 193 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 194 195 @InterfaceAudience.Private 196 public static final String PRIORITY = "PRIORITY"; 197 private static final Bytes PRIORITY_KEY 198 = new Bytes(Bytes.toBytes(PRIORITY)); 199 200 private static final Bytes RSGROUP_KEY = 201 new Bytes(Bytes.toBytes(RSGroupInfo.TABLE_DESC_PROP_GROUP)); 202 203 /** 204 * Relative priority of the table used for rpc scheduling 205 */ 206 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 207 208 /** 209 * Constant that denotes whether the table is READONLY by default and is false 210 */ 211 public static final boolean DEFAULT_READONLY = false; 212 213 /** 214 * Constant that denotes whether the table is compaction enabled by default 215 */ 216 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 217 218 /** 219 * Constant that denotes whether the table is split enabled by default 220 */ 221 public static final boolean DEFAULT_SPLIT_ENABLED = true; 222 223 /** 224 * Constant that denotes whether the table is merge enabled by default 225 */ 226 public static final boolean DEFAULT_MERGE_ENABLED = true; 227 228 /** 229 * Constant that denotes the maximum default size of the memstore in bytes after which 230 * the contents are flushed to the store files. 231 */ 232 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 233 234 public static final int DEFAULT_REGION_REPLICATION = 1; 235 236 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 237 238 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 239 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 240 241 static { 242 DEFAULT_VALUES.put(MAX_FILESIZE, 243 String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 244 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 245 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, 246 String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 247 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name 248 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 249 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 250 DEFAULT_VALUES.keySet().stream() 251 .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); 252 RESERVED_KEYWORDS.add(IS_META_KEY); 253 } 254 255 @InterfaceAudience.Private 256 public final static String NAMESPACE_FAMILY_INFO = "info"; 257 @InterfaceAudience.Private 258 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 259 @InterfaceAudience.Private 260 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 261 262 /** 263 * <pre> 264 * Pattern that matches a coprocessor specification. Form is: 265 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 266 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 267 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 268 * </pre> 269 */ 270 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 271 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 272 273 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 274 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 275 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( 276 "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + 277 CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 278 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 279 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 280 281 /** 282 * Table descriptor for namespace table 283 */ 284 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 285 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 286 // rethink about adding back the setCacheDataInL1 for NS table. 287 // Note: namespace schema is hard-coded. In hbase3, namespace goes away; it is integrated into 288 // hbase:meta. 289 public static final TableDescriptor NAMESPACE_TABLEDESC 290 = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 291 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 292 // Ten is arbitrary number. Keep versions to help debugging. 293 .setMaxVersions(10) 294 .setInMemory(true) 295 .setBlocksize(8 * 1024) 296 .setScope(HConstants.REPLICATION_SCOPE_LOCAL) 297 .build()) 298 .build(); 299 300 private final ModifyableTableDescriptor desc; 301 302 /** 303 * @param desc The table descriptor to serialize 304 * @return This instance serialized with pb with pb magic prefix 305 */ 306 public static byte[] toByteArray(TableDescriptor desc) { 307 if (desc instanceof ModifyableTableDescriptor) { 308 return ((ModifyableTableDescriptor) desc).toByteArray(); 309 } 310 return new ModifyableTableDescriptor(desc).toByteArray(); 311 } 312 313 /** 314 * The input should be created by {@link #toByteArray}. 315 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 316 * @return This instance serialized with pb with pb magic prefix 317 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException 318 */ 319 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 320 return ModifyableTableDescriptor.parseFrom(pbBytes); 321 } 322 323 public static TableDescriptorBuilder newBuilder(final TableName name) { 324 return new TableDescriptorBuilder(name); 325 } 326 327 public static TableDescriptor copy(TableDescriptor desc) { 328 return new ModifyableTableDescriptor(desc); 329 } 330 331 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 332 return new ModifyableTableDescriptor(name, desc); 333 } 334 335 /** 336 * Copy all values, families, and name from the input. 337 * @param desc The desciptor to copy 338 * @return A clone of input 339 */ 340 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 341 return new TableDescriptorBuilder(desc); 342 } 343 344 private TableDescriptorBuilder(final TableName name) { 345 this.desc = new ModifyableTableDescriptor(name); 346 } 347 348 private TableDescriptorBuilder(final TableDescriptor desc) { 349 this.desc = new ModifyableTableDescriptor(desc); 350 } 351 352 /** 353 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 354 * Use {@link #setCoprocessor(String)} instead 355 */ 356 @Deprecated 357 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 358 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 359 } 360 361 /** 362 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 363 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 364 */ 365 @Deprecated 366 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, 367 int priority, final Map<String, String> kvs) throws IOException { 368 desc.setCoprocessor( 369 CoprocessorDescriptorBuilder.newBuilder(className) 370 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) 371 .setPriority(priority) 372 .setProperties(kvs == null ? Collections.emptyMap() : kvs) 373 .build()); 374 return this; 375 } 376 377 /** 378 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 379 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 380 */ 381 @Deprecated 382 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 383 desc.setCoprocessorWithSpec(specStr); 384 return this; 385 } 386 387 /** 388 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 389 * Use {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 390 */ 391 @Deprecated 392 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 393 desc.setColumnFamily(family); 394 return this; 395 } 396 397 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 398 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 399 } 400 401 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 402 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 403 return this; 404 } 405 406 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 407 throws IOException { 408 for (CoprocessorDescriptor cpDesc : cpDescs) { 409 desc.setCoprocessor(cpDesc); 410 } 411 return this; 412 } 413 414 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 415 desc.setColumnFamily(Objects.requireNonNull(family)); 416 return this; 417 } 418 419 public TableDescriptorBuilder setColumnFamilies( 420 final Collection<ColumnFamilyDescriptor> families) { 421 families.forEach(desc::setColumnFamily); 422 return this; 423 } 424 425 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 426 desc.modifyColumnFamily(Objects.requireNonNull(family)); 427 return this; 428 } 429 430 public TableDescriptorBuilder removeValue(Bytes key) { 431 desc.removeValue(key); 432 return this; 433 } 434 435 public TableDescriptorBuilder removeValue(byte[] key) { 436 desc.removeValue(key); 437 return this; 438 } 439 440 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 441 desc.removeColumnFamily(name); 442 return this; 443 } 444 445 public TableDescriptorBuilder removeCoprocessor(String className) { 446 desc.removeCoprocessor(className); 447 return this; 448 } 449 450 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 451 desc.setCompactionEnabled(isEnable); 452 return this; 453 } 454 455 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 456 desc.setSplitEnabled(isEnable); 457 return this; 458 } 459 460 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 461 desc.setMergeEnabled(isEnable); 462 return this; 463 } 464 465 public TableDescriptorBuilder setDurability(Durability durability) { 466 desc.setDurability(durability); 467 return this; 468 } 469 470 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 471 desc.setFlushPolicyClassName(clazz); 472 return this; 473 } 474 475 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 476 desc.setMaxFileSize(maxFileSize); 477 return this; 478 } 479 480 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 481 desc.setMemStoreFlushSize(memstoreFlushSize); 482 return this; 483 } 484 485 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 486 desc.setNormalizerTargetRegionCount(regionCount); 487 return this; 488 } 489 490 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 491 desc.setNormalizerTargetRegionSize(regionSize); 492 return this; 493 } 494 495 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 496 desc.setNormalizationEnabled(isEnable); 497 return this; 498 } 499 500 /** 501 * @deprecated since 2.0.0 and will be removed in 3.0.0. 502 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 503 */ 504 @Deprecated 505 public TableDescriptorBuilder setOwner(User owner) { 506 desc.setOwner(owner); 507 return this; 508 } 509 510 /** 511 * @deprecated since 2.0.0 and will be removed in 3.0.0. 512 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 513 */ 514 @Deprecated 515 public TableDescriptorBuilder setOwnerString(String ownerString) { 516 desc.setOwnerString(ownerString); 517 return this; 518 } 519 520 public TableDescriptorBuilder setPriority(int priority) { 521 desc.setPriority(priority); 522 return this; 523 } 524 525 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 526 desc.setReadOnly(readOnly); 527 return this; 528 } 529 530 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 531 desc.setRegionMemStoreReplication(memstoreReplication); 532 return this; 533 } 534 535 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 536 desc.setRegionReplication(regionReplication); 537 return this; 538 } 539 540 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 541 desc.setRegionSplitPolicyClassName(clazz); 542 return this; 543 } 544 545 public TableDescriptorBuilder setValue(final String key, final String value) { 546 desc.setValue(key, value); 547 return this; 548 } 549 550 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 551 desc.setValue(key, value); 552 return this; 553 } 554 555 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 556 desc.setValue(key, value); 557 return this; 558 } 559 560 /** 561 * Sets replication scope all & only the columns already in the builder. Columns added later won't 562 * be backfilled with replication scope. 563 * @param scope replication scope 564 * @return a TableDescriptorBuilder 565 */ 566 public TableDescriptorBuilder setReplicationScope(int scope) { 567 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 568 newFamilies.putAll(desc.families); 569 newFamilies 570 .forEach((cf, cfDesc) -> { 571 desc.removeColumnFamily(cf); 572 desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope) 573 .build()); 574 }); 575 return this; 576 } 577 578 /** 579 * Set the RSGroup for this table, specified RSGroup must exist before create or modify table. 580 * 581 * @param group rsgroup name 582 * @return a TableDescriptorBuilder 583 */ 584 public TableDescriptorBuilder setRegionServerGroup(String group) { 585 desc.setValue(RSGROUP_KEY, group); 586 return this; 587 } 588 589 public TableDescriptor build() { 590 return new ModifyableTableDescriptor(desc); 591 } 592 593 /** 594 * TODO: make this private after removing the HTableDescriptor 595 */ 596 @InterfaceAudience.Private 597 public static class ModifyableTableDescriptor 598 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 599 600 private final TableName name; 601 602 /** 603 * A map which holds the metadata information of the table. This metadata 604 * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE, 605 * READONLY, MEMSTORE_FLUSHSIZE etc... 606 */ 607 private final Map<Bytes, Bytes> values = new HashMap<>(); 608 609 /** 610 * Maps column family name to the respective FamilyDescriptors 611 */ 612 private final Map<byte[], ColumnFamilyDescriptor> families 613 = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 614 615 /** 616 * Construct a table descriptor specifying a TableName object 617 * 618 * @param name Table name. 619 * TODO: make this private after removing the HTableDescriptor 620 */ 621 @InterfaceAudience.Private 622 public ModifyableTableDescriptor(final TableName name) { 623 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 624 } 625 626 private ModifyableTableDescriptor(final TableDescriptor desc) { 627 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 628 } 629 630 /** 631 * Construct a table descriptor by cloning the descriptor passed as a 632 * parameter. 633 * <p> 634 * Makes a deep copy of the supplied descriptor. 635 * @param name The new name 636 * @param desc The descriptor. 637 * TODO: make this private after removing the HTableDescriptor 638 */ 639 @InterfaceAudience.Private 640 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 641 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 642 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 643 } 644 645 private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families, 646 Map<Bytes, Bytes> values) { 647 this.name = name; 648 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 649 this.values.putAll(values); 650 this.values.put(IS_META_KEY, 651 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 652 } 653 654 /** 655 * Checks if this table is <code> hbase:meta </code> region. 656 * 657 * @return true if this table is <code> hbase:meta </code> region 658 */ 659 @Override 660 public boolean isMetaRegion() { 661 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 662 } 663 664 /** 665 * Checks if the table is a <code>hbase:meta</code> table 666 * 667 * @return true if table is <code> hbase:meta </code> region. 668 */ 669 @Override 670 public boolean isMetaTable() { 671 return isMetaRegion(); 672 } 673 674 @Override 675 public Bytes getValue(Bytes key) { 676 Bytes rval = values.get(key); 677 return rval == null ? null : new Bytes(rval.copyBytes()); 678 } 679 680 @Override 681 public String getValue(String key) { 682 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 683 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 684 } 685 686 @Override 687 public byte[] getValue(byte[] key) { 688 Bytes value = values.get(new Bytes(key)); 689 return value == null ? null : value.copyBytes(); 690 } 691 692 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 693 Bytes value = values.get(key); 694 if (value == null) { 695 return defaultValue; 696 } else { 697 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 698 } 699 } 700 701 /** 702 * Getter for fetching an unmodifiable {@link #values} map. 703 * 704 * @return unmodifiable map {@link #values}. 705 * @see #values 706 */ 707 @Override 708 public Map<Bytes, Bytes> getValues() { 709 // shallow pointer copy 710 return Collections.unmodifiableMap(values); 711 } 712 713 /** 714 * Setter for storing metadata as a (key, value) pair in {@link #values} map 715 * 716 * @param key The key. 717 * @param value The value. If null, removes the setting. 718 * @return the modifyable TD 719 * @see #values 720 */ 721 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 722 return setValue(toBytesOrNull(key, v -> v), 723 toBytesOrNull(value, v -> v)); 724 } 725 726 public ModifyableTableDescriptor setValue(String key, String value) { 727 return setValue(toBytesOrNull(key, Bytes::toBytes), 728 toBytesOrNull(value, Bytes::toBytes)); 729 } 730 731 /** 732 * @param key The key. 733 * @param value The value. If null, removes the setting. 734 */ 735 private ModifyableTableDescriptor setValue(final Bytes key, 736 final String value) { 737 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 738 } 739 740 /** 741 * Setter for storing metadata as a (key, value) pair in {@link #values} map 742 * 743 * @param key The key. 744 * @param value The value. If null, removes the setting. 745 */ 746 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 747 if (value == null || value.getLength() == 0) { 748 values.remove(key); 749 } else { 750 values.put(key, value); 751 } 752 return this; 753 } 754 755 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 756 if (t == null) { 757 return null; 758 } else { 759 return new Bytes(f.apply(t)); 760 } 761 } 762 763 /** 764 * Remove metadata represented by the key from the {@link #values} map 765 * 766 * @param key Key whose key and value we're to remove from TableDescriptor 767 * parameters. 768 * @return the modifyable TD 769 */ 770 public ModifyableTableDescriptor removeValue(Bytes key) { 771 return setValue(key, (Bytes) null); 772 } 773 774 /** 775 * Remove metadata represented by the key from the {@link #values} map 776 * 777 * @param key Key whose key and value we're to remove from TableDescriptor 778 * parameters. 779 * @return the modifyable TD 780 */ 781 public ModifyableTableDescriptor removeValue(final byte[] key) { 782 return removeValue(new Bytes(key)); 783 } 784 785 /** 786 * Check if the readOnly flag of the table is set. If the readOnly flag is 787 * set then the contents of the table can only be read from but not 788 * modified. 789 * 790 * @return true if all columns in the table should be read only 791 */ 792 @Override 793 public boolean isReadOnly() { 794 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 795 } 796 797 /** 798 * Setting the table as read only sets all the columns in the table as read 799 * only. By default all tables are modifiable, but if the readOnly flag is 800 * set to true then the contents of the table can only be read but not 801 * modified. 802 * 803 * @param readOnly True if all of the columns in the table should be read 804 * only. 805 * @return the modifyable TD 806 */ 807 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 808 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 809 } 810 811 /** 812 * Check if the compaction enable flag of the table is true. If flag is 813 * false then no minor/major compactions will be done in real. 814 * 815 * @return true if table compaction enabled 816 */ 817 @Override 818 public boolean isCompactionEnabled() { 819 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 820 } 821 822 /** 823 * Setting the table compaction enable flag. 824 * 825 * @param isEnable True if enable compaction. 826 * @return the modifyable TD 827 */ 828 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 829 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 830 } 831 832 /** 833 * Check if the split enable flag of the table is true. If flag is false then no split will be 834 * done. 835 * 836 * @return true if table region split enabled 837 */ 838 @Override 839 public boolean isSplitEnabled() { 840 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 841 } 842 843 /** 844 * Setting the table region split enable flag. 845 * @param isEnable True if enable region split. 846 * 847 * @return the modifyable TD 848 */ 849 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 850 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 851 } 852 853 /** 854 * Check if the region merge enable flag of the table is true. If flag is false then no merge 855 * will be done. 856 * 857 * @return true if table region merge enabled 858 */ 859 @Override 860 public boolean isMergeEnabled() { 861 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 862 } 863 864 /** 865 * Setting the table region merge enable flag. 866 * @param isEnable True if enable region merge. 867 * 868 * @return the modifyable TD 869 */ 870 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 871 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 872 } 873 874 /** 875 * Check if normalization enable flag of the table is true. If flag is false 876 * then no region normalizer won't attempt to normalize this table. 877 * @return true if region normalization is enabled for this table 878 **/ 879 @Override 880 public boolean isNormalizationEnabled() { 881 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); 882 } 883 884 /** 885 * Check if there is the target region count. If so, the normalize plan will be calculated based 886 * on the target region count. 887 * @return target region count after normalize done 888 */ 889 @Override 890 public int getNormalizerTargetRegionCount() { 891 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 892 Integer.valueOf(-1)); 893 } 894 895 /** 896 * Check if there is the target region size. If so, the normalize plan will be calculated based 897 * on the target region size. 898 * @return target region size after normalize done 899 */ 900 @Override 901 public long getNormalizerTargetRegionSize() { 902 return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)); 903 } 904 905 /** 906 * Setting the table normalization enable flag. 907 * 908 * @param isEnable True if enable normalization. 909 * @return the modifyable TD 910 */ 911 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 912 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 913 } 914 915 /** 916 * Setting the target region count of table normalization . 917 * @param regionCount the target region count. 918 * @return the modifyable TD 919 */ 920 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 921 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 922 } 923 924 /** 925 * Setting the target region size of table normalization. 926 * @param regionSize the target region size. 927 * @return the modifyable TD 928 */ 929 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 930 return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize)); 931 } 932 933 /** 934 * Sets the {@link Durability} setting for the table. This defaults to 935 * Durability.USE_DEFAULT. 936 * 937 * @param durability enum value 938 * @return the modifyable TD 939 */ 940 public ModifyableTableDescriptor setDurability(Durability durability) { 941 return setValue(DURABILITY_KEY, durability.name()); 942 } 943 944 /** 945 * Returns the durability setting for the table. 946 * 947 * @return durability setting for the table. 948 */ 949 @Override 950 public Durability getDurability() { 951 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 952 } 953 954 /** 955 * Get the name of the table 956 * 957 * @return TableName 958 */ 959 @Override 960 public TableName getTableName() { 961 return name; 962 } 963 964 /** 965 * This sets the class associated with the region split policy which 966 * determines when a region split should occur. The class used by default is 967 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 968 * 969 * @param clazz the class name 970 * @return the modifyable TD 971 */ 972 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 973 return setValue(SPLIT_POLICY_KEY, clazz); 974 } 975 976 /** 977 * This gets the class associated with the region split policy which 978 * determines when a region split should occur. The class used by default is 979 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 980 * 981 * @return the class name of the region split policy for this table. If this 982 * returns null, the default split policy is used. 983 */ 984 @Override 985 public String getRegionSplitPolicyClassName() { 986 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 987 } 988 989 /** 990 * Returns the maximum size upto which a region can grow to after which a 991 * region split is triggered. The region size is represented by the size of 992 * the biggest store file in that region. 993 * 994 * @return max hregion size for table, -1 if not set. 995 * 996 * @see #setMaxFileSize(long) 997 */ 998 @Override 999 public long getMaxFileSize() { 1000 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 1001 } 1002 1003 /** 1004 * Sets the maximum size upto which a region can grow to after which a 1005 * region split is triggered. The region size is represented by the size of 1006 * the biggest store file in that region, i.e. If the biggest store file 1007 * grows beyond the maxFileSize, then the region split is triggered. This 1008 * defaults to a value of 256 MB. 1009 * <p> 1010 * This is not an absolute value and might vary. Assume that a single row 1011 * exceeds the maxFileSize then the storeFileSize will be greater than 1012 * maxFileSize since a single row cannot be split across multiple regions 1013 * </p> 1014 * 1015 * @param maxFileSize The maximum file size that a store file can grow to 1016 * before a split is triggered. 1017 * @return the modifyable TD 1018 */ 1019 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 1020 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 1021 } 1022 1023 /** 1024 * Returns the size of the memstore after which a flush to filesystem is 1025 * triggered. 1026 * 1027 * @return memory cache flush size for each hregion, -1 if not set. 1028 * 1029 * @see #setMemStoreFlushSize(long) 1030 */ 1031 @Override 1032 public long getMemStoreFlushSize() { 1033 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 1034 } 1035 1036 /** 1037 * Represents the maximum size of the memstore after which the contents of 1038 * the memstore are flushed to the filesystem. This defaults to a size of 64 1039 * MB. 1040 * 1041 * @param memstoreFlushSize memory cache flush size for each hregion 1042 * @return the modifyable TD 1043 */ 1044 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1045 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1046 } 1047 1048 /** 1049 * This sets the class associated with the flush policy which determines 1050 * determines the stores need to be flushed when flushing a region. The 1051 * class used by default is defined in 1052 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1053 * 1054 * @param clazz the class name 1055 * @return the modifyable TD 1056 */ 1057 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1058 return setValue(FLUSH_POLICY_KEY, clazz); 1059 } 1060 1061 /** 1062 * This gets the class associated with the flush policy which determines the 1063 * stores need to be flushed when flushing a region. The class used by 1064 * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. 1065 * 1066 * @return the class name of the flush policy for this table. If this 1067 * returns null, the default flush policy is used. 1068 */ 1069 @Override 1070 public String getFlushPolicyClassName() { 1071 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1072 } 1073 1074 /** 1075 * Adds a column family. For the updating purpose please use 1076 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1077 * 1078 * @param family to add. 1079 * @return the modifyable TD 1080 */ 1081 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1082 if (family.getName() == null || family.getName().length <= 0) { 1083 throw new IllegalArgumentException("Family name cannot be null or empty"); 1084 } 1085 int flength = family.getName() == null ? 0 : family.getName().length; 1086 if (flength > Byte.MAX_VALUE) { 1087 throw new IllegalArgumentException("The length of family name is bigger than " + Byte.MAX_VALUE); 1088 } 1089 if (hasColumnFamily(family.getName())) { 1090 throw new IllegalArgumentException("Family '" 1091 + family.getNameAsString() + "' already exists so cannot be added"); 1092 } 1093 return putColumnFamily(family); 1094 } 1095 1096 /** 1097 * Modifies the existing column family. 1098 * 1099 * @param family to update 1100 * @return this (for chained invocation) 1101 */ 1102 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1103 if (family.getName() == null || family.getName().length <= 0) { 1104 throw new IllegalArgumentException("Family name cannot be null or empty"); 1105 } 1106 if (!hasColumnFamily(family.getName())) { 1107 throw new IllegalArgumentException("Column family '" + family.getNameAsString() 1108 + "' does not exist"); 1109 } 1110 return putColumnFamily(family); 1111 } 1112 1113 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1114 families.put(family.getName(), family); 1115 return this; 1116 } 1117 1118 /** 1119 * Checks to see if this table contains the given column family 1120 * 1121 * @param familyName Family name or column name. 1122 * @return true if the table contains the specified family name 1123 */ 1124 @Override 1125 public boolean hasColumnFamily(final byte[] familyName) { 1126 return families.containsKey(familyName); 1127 } 1128 1129 /** 1130 * @return Name of this table and then a map of all of the column family descriptors. 1131 */ 1132 @Override 1133 public String toString() { 1134 StringBuilder s = new StringBuilder(); 1135 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1136 s.append(getValues(true)); 1137 families.values().forEach(f -> s.append(", ").append(f)); 1138 return s.toString(); 1139 } 1140 1141 /** 1142 * @return Name of this table and then a map of all of the column family 1143 * descriptors (with only the non-default column family attributes) 1144 */ 1145 @Override 1146 public String toStringCustomizedValues() { 1147 StringBuilder s = new StringBuilder(); 1148 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1149 s.append(getValues(false)); 1150 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1151 return s.toString(); 1152 } 1153 1154 /** 1155 * @return map of all table attributes formatted into string. 1156 */ 1157 public String toStringTableAttributes() { 1158 return getValues(true).toString(); 1159 } 1160 1161 private StringBuilder getValues(boolean printDefaults) { 1162 StringBuilder s = new StringBuilder(); 1163 1164 // step 1: set partitioning and pruning 1165 Set<Bytes> reservedKeys = new TreeSet<>(); 1166 Set<Bytes> userKeys = new TreeSet<>(); 1167 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1168 if (entry.getKey() == null || entry.getKey().get() == null) { 1169 continue; 1170 } 1171 String key = Bytes.toString(entry.getKey().get()); 1172 // in this section, print out reserved keywords + coprocessor info 1173 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1174 userKeys.add(entry.getKey()); 1175 continue; 1176 } 1177 // only print out IS_META if true 1178 String value = Bytes.toString(entry.getValue().get()); 1179 if (key.equalsIgnoreCase(IS_META)) { 1180 if (Boolean.valueOf(value) == false) { 1181 continue; 1182 } 1183 } 1184 // see if a reserved key is a default value. may not want to print it out 1185 if (printDefaults 1186 || !DEFAULT_VALUES.containsKey(key) 1187 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { 1188 reservedKeys.add(entry.getKey()); 1189 } 1190 } 1191 1192 // early exit optimization 1193 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1194 if (!hasAttributes) { 1195 return s; 1196 } 1197 1198 s.append(", {"); 1199 // step 2: printing attributes 1200 if (hasAttributes) { 1201 s.append("TABLE_ATTRIBUTES => {"); 1202 1203 // print all reserved keys first 1204 boolean printCommaForAttr = false; 1205 for (Bytes k : reservedKeys) { 1206 String key = Bytes.toString(k.get()); 1207 String value = Bytes.toStringBinary(values.get(k).get()); 1208 if (printCommaForAttr) { 1209 s.append(", "); 1210 } 1211 printCommaForAttr = true; 1212 s.append(key); 1213 s.append(" => "); 1214 s.append('\'').append(value).append('\''); 1215 } 1216 1217 if (!userKeys.isEmpty()) { 1218 // print all non-reserved as a separate subset 1219 if (printCommaForAttr) { 1220 s.append(", "); 1221 } 1222 s.append(HConstants.METADATA).append(" => "); 1223 s.append("{"); 1224 boolean printCommaForCfg = false; 1225 for (Bytes k : userKeys) { 1226 String key = Bytes.toString(k.get()); 1227 String value = Bytes.toStringBinary(values.get(k).get()); 1228 if (printCommaForCfg) { 1229 s.append(", "); 1230 } 1231 printCommaForCfg = true; 1232 s.append('\'').append(key).append('\''); 1233 s.append(" => "); 1234 s.append('\'').append(value).append('\''); 1235 } 1236 s.append("}"); 1237 } 1238 1239 s.append("}"); 1240 } 1241 1242 s.append("}"); // end METHOD 1243 return s; 1244 } 1245 1246 /** 1247 * Compare the contents of the descriptor with another one passed as a 1248 * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, 1249 * if yes then the contents of the descriptors are compared. 1250 * 1251 * @param obj The object to compare 1252 * @return true if the contents of the the two descriptors exactly match 1253 * 1254 * @see java.lang.Object#equals(java.lang.Object) 1255 */ 1256 @Override 1257 public boolean equals(Object obj) { 1258 if (this == obj) { 1259 return true; 1260 } 1261 if (obj instanceof ModifyableTableDescriptor) { 1262 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1263 } 1264 return false; 1265 } 1266 1267 /** 1268 * @return hash code 1269 */ 1270 @Override 1271 public int hashCode() { 1272 int result = this.name.hashCode(); 1273 if (this.families.size() > 0) { 1274 for (ColumnFamilyDescriptor e : this.families.values()) { 1275 result ^= e.hashCode(); 1276 } 1277 } 1278 result ^= values.hashCode(); 1279 return result; 1280 } 1281 1282 // Comparable 1283 /** 1284 * Compares the descriptor with another descriptor which is passed as a 1285 * parameter. This compares the content of the two descriptors and not the 1286 * reference. 1287 * 1288 * @param other The MTD to compare 1289 * @return 0 if the contents of the descriptors are exactly matching, 1 if 1290 * there is a mismatch in the contents 1291 */ 1292 @Override 1293 public int compareTo(final ModifyableTableDescriptor other) { 1294 return TableDescriptor.COMPARATOR.compare(this, other); 1295 } 1296 1297 @Override 1298 public ColumnFamilyDescriptor[] getColumnFamilies() { 1299 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1300 } 1301 1302 /** 1303 * Returns the configured replicas per region 1304 */ 1305 @Override 1306 public int getRegionReplication() { 1307 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1308 } 1309 1310 /** 1311 * Sets the number of replicas per region. 1312 * 1313 * @param regionReplication the replication factor per region 1314 * @return the modifyable TD 1315 */ 1316 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1317 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1318 } 1319 1320 /** 1321 * @return true if the read-replicas memstore replication is enabled. 1322 */ 1323 @Override 1324 public boolean hasRegionMemStoreReplication() { 1325 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); 1326 } 1327 1328 /** 1329 * Enable or Disable the memstore replication from the primary region to the 1330 * replicas. The replication will be used only for meta operations (e.g. 1331 * flush, compaction, ...) 1332 * 1333 * @param memstoreReplication true if the new data written to the primary 1334 * region should be replicated. false if the secondaries can tollerate to 1335 * have new data only when the primary flushes the memstore. 1336 * @return the modifyable TD 1337 */ 1338 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1339 setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1340 // If the memstore replication is setup, we do not have to wait for observing a flush event 1341 // from primary before starting to serve reads, because gaps from replication is not applicable 1342 return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, 1343 Boolean.toString(memstoreReplication)); 1344 } 1345 1346 public ModifyableTableDescriptor setPriority(int priority) { 1347 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1348 } 1349 1350 @Override 1351 public int getPriority() { 1352 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1353 } 1354 1355 /** 1356 * Returns all the column family names of the current table. The map of 1357 * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. 1358 * This returns all the keys of the family map which represents the column 1359 * family names of the table. 1360 * 1361 * @return Immutable sorted set of the keys of the families. 1362 */ 1363 @Override 1364 public Set<byte[]> getColumnFamilyNames() { 1365 return Collections.unmodifiableSet(this.families.keySet()); 1366 } 1367 1368 /** 1369 * Returns the ColumnFamilyDescriptor for a specific column family with name as 1370 * specified by the parameter column. 1371 * 1372 * @param column Column family name 1373 * @return Column descriptor for the passed family name or the family on 1374 * passed in column. 1375 */ 1376 @Override 1377 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1378 return this.families.get(column); 1379 } 1380 1381 /** 1382 * Removes the ColumnFamilyDescriptor with name specified by the parameter column 1383 * from the table descriptor 1384 * 1385 * @param column Name of the column family to be removed. 1386 * @return Column descriptor for the passed family name or the family on 1387 * passed in column. 1388 */ 1389 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1390 return this.families.remove(column); 1391 } 1392 1393 /** 1394 * Add a table coprocessor to this table. The coprocessor type must be 1395 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1396 * check if the class can be loaded or not. Whether a coprocessor is 1397 * loadable or not will be determined when a region is opened. 1398 * 1399 * @param className Full class name. 1400 * @throws IOException 1401 * @return the modifyable TD 1402 */ 1403 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1404 return setCoprocessor( 1405 CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER) 1406 .build()); 1407 } 1408 1409 /** 1410 * Add a table coprocessor to this table. The coprocessor type must be 1411 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1412 * check if the class can be loaded or not. Whether a coprocessor is 1413 * loadable or not will be determined when a region is opened. 1414 * 1415 * @throws IOException any illegal parameter key/value 1416 * @return the modifyable TD 1417 */ 1418 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) 1419 throws IOException { 1420 checkHasCoprocessor(cp.getClassName()); 1421 if (cp.getPriority() < 0) { 1422 throw new IOException("Priority must be bigger than or equal with zero, current:" 1423 + cp.getPriority()); 1424 } 1425 // Validate parameter kvs and then add key/values to kvString. 1426 StringBuilder kvString = new StringBuilder(); 1427 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1428 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1429 throw new IOException("Illegal parameter key = " + e.getKey()); 1430 } 1431 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1432 throw new IOException("Illegal parameter (" + e.getKey() 1433 + ") value = " + e.getValue()); 1434 } 1435 if (kvString.length() != 0) { 1436 kvString.append(','); 1437 } 1438 kvString.append(e.getKey()); 1439 kvString.append('='); 1440 kvString.append(e.getValue()); 1441 } 1442 1443 String value = cp.getJarPath().orElse("") 1444 + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|" 1445 + kvString.toString(); 1446 return setCoprocessorToMap(value); 1447 } 1448 1449 /** 1450 * Add a table coprocessor to this table. The coprocessor type must be 1451 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1452 * check if the class can be loaded or not. Whether a coprocessor is 1453 * loadable or not will be determined when a region is opened. 1454 * 1455 * @param specStr The Coprocessor specification all in in one String 1456 * @throws IOException 1457 * @return the modifyable TD 1458 * @deprecated used by HTableDescriptor and admin.rb. 1459 * As of release 2.0.0, this will be removed in HBase 3.0.0. 1460 */ 1461 @Deprecated 1462 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1463 throws IOException { 1464 CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow( 1465 () -> new IllegalArgumentException( 1466 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1467 checkHasCoprocessor(cpDesc.getClassName()); 1468 return setCoprocessorToMap(specStr); 1469 } 1470 1471 private void checkHasCoprocessor(final String className) throws IOException { 1472 if (hasCoprocessor(className)) { 1473 throw new IOException("Coprocessor " + className + " already exists."); 1474 } 1475 } 1476 1477 /** 1478 * Add coprocessor to values Map 1479 * @param specStr The Coprocessor specification all in in one String 1480 * @return Returns <code>this</code> 1481 */ 1482 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1483 if (specStr == null) { 1484 return this; 1485 } 1486 // generate a coprocessor key 1487 int maxCoprocessorNumber = 0; 1488 Matcher keyMatcher; 1489 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1490 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1491 if (!keyMatcher.matches()) { 1492 continue; 1493 } 1494 maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1495 } 1496 maxCoprocessorNumber++; 1497 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1498 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1499 } 1500 1501 /** 1502 * Check if the table has an attached co-processor represented by the name 1503 * className 1504 * 1505 * @param classNameToMatch - Class name of the co-processor 1506 * @return true of the table has a co-processor className 1507 */ 1508 @Override 1509 public boolean hasCoprocessor(String classNameToMatch) { 1510 return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName() 1511 .equals(classNameToMatch)); 1512 } 1513 1514 /** 1515 * Return the list of attached co-processor represented by their name 1516 * className 1517 * 1518 * @return The list of co-processors classNames 1519 */ 1520 @Override 1521 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1522 List<CoprocessorDescriptor> result = new ArrayList<>(); 1523 for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) { 1524 String key = Bytes.toString(e.getKey().get()).trim(); 1525 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1526 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()) 1527 .ifPresent(result::add); 1528 } 1529 } 1530 return result; 1531 } 1532 1533 /** 1534 * Remove a coprocessor from those set on the table 1535 * 1536 * @param className Class name of the co-processor 1537 */ 1538 public void removeCoprocessor(String className) { 1539 Bytes match = null; 1540 Matcher keyMatcher; 1541 Matcher valueMatcher; 1542 for (Map.Entry<Bytes, Bytes> e : this.values 1543 .entrySet()) { 1544 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e 1545 .getKey().get())); 1546 if (!keyMatcher.matches()) { 1547 continue; 1548 } 1549 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes 1550 .toString(e.getValue().get())); 1551 if (!valueMatcher.matches()) { 1552 continue; 1553 } 1554 // get className and compare 1555 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1556 // remove the CP if it is present 1557 if (clazz.equals(className.trim())) { 1558 match = e.getKey(); 1559 break; 1560 } 1561 } 1562 // if we found a match, remove it 1563 if (match != null) { 1564 ModifyableTableDescriptor.this.removeValue(match); 1565 } 1566 } 1567 1568 /** 1569 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1570 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1571 */ 1572 @Deprecated 1573 public ModifyableTableDescriptor setOwner(User owner) { 1574 return setOwnerString(owner != null ? owner.getShortName() : null); 1575 } 1576 1577 /** 1578 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1579 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1580 */ 1581 // used by admin.rb:alter(table_name,*args) to update owner. 1582 @Deprecated 1583 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1584 return setValue(OWNER_KEY, ownerString); 1585 } 1586 1587 /** 1588 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1589 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1590 */ 1591 @Override 1592 @Deprecated 1593 public String getOwnerString() { 1594 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1595 // hbase:meta should return system user as owner, not null (see 1596 // MasterFileSystem.java:bootstrap()). 1597 return getOrDefault(OWNER_KEY, Function.identity(), null); 1598 } 1599 1600 /** 1601 * @return the bytes in pb format 1602 */ 1603 private byte[] toByteArray() { 1604 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1605 } 1606 1607 /** 1608 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance 1609 * with pb magic prefix 1610 * @return An instance of {@link ModifyableTableDescriptor} made from 1611 * <code>bytes</code> 1612 * @throws DeserializationException 1613 * @see #toByteArray() 1614 */ 1615 private static TableDescriptor parseFrom(final byte[] bytes) 1616 throws DeserializationException { 1617 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1618 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1619 } 1620 int pblen = ProtobufUtil.lengthOfPBMagic(); 1621 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1622 try { 1623 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1624 return ProtobufUtil.toTableDescriptor(builder.build()); 1625 } catch (IOException e) { 1626 throw new DeserializationException(e); 1627 } 1628 } 1629 1630 @Override 1631 public int getColumnFamilyCount() { 1632 return families.size(); 1633 } 1634 1635 @Override 1636 public Optional<String> getRegionServerGroup() { 1637 Bytes value = values.get(RSGROUP_KEY); 1638 if (value != null) { 1639 return Optional.of(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 1640 } else { 1641 return Optional.empty(); 1642 } 1643 } 1644 } 1645 1646 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1647 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1648 if (matcher.matches()) { 1649 // jar file path can be empty if the cp class can be loaded 1650 // from class loader. 1651 String path = matcher.group(1).trim().isEmpty() ? 1652 null : matcher.group(1).trim(); 1653 String className = matcher.group(2).trim(); 1654 if (className.isEmpty()) { 1655 return Optional.empty(); 1656 } 1657 String priorityStr = matcher.group(3).trim(); 1658 int priority = priorityStr.isEmpty() ? 1659 Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1660 String cfgSpec = null; 1661 try { 1662 cfgSpec = matcher.group(4); 1663 } catch (IndexOutOfBoundsException ex) { 1664 // ignore 1665 } 1666 Map<String, String> ourConf = new TreeMap<>(); 1667 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1668 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1669 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1670 while (m.find()) { 1671 ourConf.put(m.group(1), m.group(2)); 1672 } 1673 } 1674 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className) 1675 .setJarPath(path) 1676 .setPriority(priority) 1677 .setProperties(ourConf) 1678 .build()); 1679 } 1680 return Optional.empty(); 1681 } 1682}