001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.client; 020 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Arrays; 024import java.util.Collection; 025import java.util.Collections; 026import java.util.HashMap; 027import java.util.HashSet; 028import java.util.List; 029import java.util.Map; 030import java.util.Objects; 031import java.util.Optional; 032import java.util.Set; 033import java.util.TreeMap; 034import java.util.TreeSet; 035import java.util.function.Function; 036import java.util.regex.Matcher; 037import java.util.regex.Pattern; 038import org.apache.hadoop.fs.Path; 039import org.apache.hadoop.hbase.Coprocessor; 040import org.apache.hadoop.hbase.HConstants; 041import org.apache.hadoop.hbase.TableName; 042import org.apache.hadoop.hbase.exceptions.DeserializationException; 043import org.apache.hadoop.hbase.security.User; 044import org.apache.hadoop.hbase.util.Bytes; 045import org.apache.yetus.audience.InterfaceAudience; 046import org.slf4j.Logger; 047import org.slf4j.LoggerFactory; 048 049import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; 050import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; 051 052/** 053 * Convenience class for composing an instance of {@link TableDescriptor}. 054 * @since 2.0.0 055 */ 056@InterfaceAudience.Public 057public class TableDescriptorBuilder { 058 public static final Logger LOG = LoggerFactory.getLogger(TableDescriptorBuilder.class); 059 @InterfaceAudience.Private 060 public static final String SPLIT_POLICY = "SPLIT_POLICY"; 061 private static final Bytes SPLIT_POLICY_KEY = new Bytes(Bytes.toBytes(SPLIT_POLICY)); 062 /** 063 * Used by HBase Shell interface to access this metadata 064 * attribute which denotes the maximum size of the store file after which a 065 * region split occurs. 066 */ 067 @InterfaceAudience.Private 068 public static final String MAX_FILESIZE = "MAX_FILESIZE"; 069 private static final Bytes MAX_FILESIZE_KEY 070 = new Bytes(Bytes.toBytes(MAX_FILESIZE)); 071 072 @InterfaceAudience.Private 073 public static final String OWNER = "OWNER"; 074 @InterfaceAudience.Private 075 public static final Bytes OWNER_KEY 076 = new Bytes(Bytes.toBytes(OWNER)); 077 078 /** 079 * Used by rest interface to access this metadata attribute 080 * which denotes if the table is Read Only. 081 */ 082 @InterfaceAudience.Private 083 public static final String READONLY = "READONLY"; 084 private static final Bytes READONLY_KEY 085 = new Bytes(Bytes.toBytes(READONLY)); 086 087 /** 088 * Used by HBase Shell interface to access this metadata 089 * attribute which denotes if the table is compaction enabled. 090 */ 091 @InterfaceAudience.Private 092 public static final String COMPACTION_ENABLED = "COMPACTION_ENABLED"; 093 private static final Bytes COMPACTION_ENABLED_KEY 094 = new Bytes(Bytes.toBytes(COMPACTION_ENABLED)); 095 096 /** 097 * Used by HBase Shell interface to access this metadata 098 * attribute which denotes if the table is split enabled. 099 */ 100 @InterfaceAudience.Private 101 public static final String SPLIT_ENABLED = "SPLIT_ENABLED"; 102 private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED)); 103 104 /** 105 * Used by HBase Shell interface to access this metadata 106 * attribute which denotes if the table is merge enabled. 107 */ 108 @InterfaceAudience.Private 109 public static final String MERGE_ENABLED = "MERGE_ENABLED"; 110 private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED)); 111 112 /** 113 * Used by HBase Shell interface to access this metadata 114 * attribute which represents the maximum size of the memstore after which its 115 * contents are flushed onto the disk. 116 */ 117 @InterfaceAudience.Private 118 public static final String MEMSTORE_FLUSHSIZE = "MEMSTORE_FLUSHSIZE"; 119 private static final Bytes MEMSTORE_FLUSHSIZE_KEY 120 = new Bytes(Bytes.toBytes(MEMSTORE_FLUSHSIZE)); 121 122 @InterfaceAudience.Private 123 public static final String FLUSH_POLICY = "FLUSH_POLICY"; 124 private static final Bytes FLUSH_POLICY_KEY = new Bytes(Bytes.toBytes(FLUSH_POLICY)); 125 /** 126 * Used by rest interface to access this metadata attribute 127 * which denotes if it is a catalog table, either <code> hbase:meta </code>. 128 */ 129 @InterfaceAudience.Private 130 public static final String IS_META = "IS_META"; 131 private static final Bytes IS_META_KEY 132 = new Bytes(Bytes.toBytes(IS_META)); 133 134 /** 135 * {@link Durability} setting for the table. 136 */ 137 @InterfaceAudience.Private 138 public static final String DURABILITY = "DURABILITY"; 139 private static final Bytes DURABILITY_KEY 140 = new Bytes(Bytes.toBytes("DURABILITY")); 141 142 /** 143 * The number of region replicas for the table. 144 */ 145 @InterfaceAudience.Private 146 public static final String REGION_REPLICATION = "REGION_REPLICATION"; 147 private static final Bytes REGION_REPLICATION_KEY 148 = new Bytes(Bytes.toBytes(REGION_REPLICATION)); 149 150 /** 151 * The flag to indicate whether or not the memstore should be 152 * replicated for read-replicas (CONSISTENCY => TIMELINE). 153 */ 154 @InterfaceAudience.Private 155 public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; 156 private static final Bytes REGION_MEMSTORE_REPLICATION_KEY 157 = new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); 158 159 private static final Bytes REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY 160 = new Bytes(Bytes.toBytes(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY)); 161 /** 162 * Used by shell/rest interface to access this metadata 163 * attribute which denotes if the table should be treated by region 164 * normalizer. 165 */ 166 @InterfaceAudience.Private 167 public static final String NORMALIZATION_ENABLED = "NORMALIZATION_ENABLED"; 168 private static final Bytes NORMALIZATION_ENABLED_KEY 169 = new Bytes(Bytes.toBytes(NORMALIZATION_ENABLED)); 170 171 @InterfaceAudience.Private 172 public static final String NORMALIZER_TARGET_REGION_COUNT = 173 "NORMALIZER_TARGET_REGION_COUNT"; 174 private static final Bytes NORMALIZER_TARGET_REGION_COUNT_KEY = 175 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT)); 176 177 @InterfaceAudience.Private 178 public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE"; 179 private static final Bytes NORMALIZER_TARGET_REGION_SIZE_KEY = 180 new Bytes(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE)); 181 182 /** 183 * Default durability for HTD is USE_DEFAULT, which defaults to HBase-global 184 * default value 185 */ 186 private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; 187 188 @InterfaceAudience.Private 189 public static final String PRIORITY = "PRIORITY"; 190 private static final Bytes PRIORITY_KEY 191 = new Bytes(Bytes.toBytes(PRIORITY)); 192 193 /** 194 * Relative priority of the table used for rpc scheduling 195 */ 196 private static final int DEFAULT_PRIORITY = HConstants.NORMAL_QOS; 197 198 /** 199 * Constant that denotes whether the table is READONLY by default and is false 200 */ 201 public static final boolean DEFAULT_READONLY = false; 202 203 /** 204 * Constant that denotes whether the table is compaction enabled by default 205 */ 206 public static final boolean DEFAULT_COMPACTION_ENABLED = true; 207 208 /** 209 * Constant that denotes whether the table is split enabled by default 210 */ 211 public static final boolean DEFAULT_SPLIT_ENABLED = true; 212 213 /** 214 * Constant that denotes whether the table is merge enabled by default 215 */ 216 public static final boolean DEFAULT_MERGE_ENABLED = true; 217 218 /** 219 * Constant that denotes whether the table is normalized by default. 220 */ 221 public static final boolean DEFAULT_NORMALIZATION_ENABLED = false; 222 223 /** 224 * Constant that denotes the maximum default size of the memstore in bytes after which 225 * the contents are flushed to the store files. 226 */ 227 public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024 * 1024 * 128L; 228 229 public static final int DEFAULT_REGION_REPLICATION = 1; 230 231 public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; 232 233 private final static Map<String, String> DEFAULT_VALUES = new HashMap<>(); 234 private final static Set<Bytes> RESERVED_KEYWORDS = new HashSet<>(); 235 236 static { 237 DEFAULT_VALUES.put(MAX_FILESIZE, 238 String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE)); 239 DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY)); 240 DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE, 241 String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); 242 DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name 243 DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); 244 DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED)); 245 DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); 246 DEFAULT_VALUES.keySet().stream() 247 .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); 248 RESERVED_KEYWORDS.add(IS_META_KEY); 249 } 250 251 @InterfaceAudience.Private 252 public final static String NAMESPACE_FAMILY_INFO = "info"; 253 @InterfaceAudience.Private 254 public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); 255 @InterfaceAudience.Private 256 public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); 257 258 /** 259 * <pre> 260 * Pattern that matches a coprocessor specification. Form is: 261 * {@code <coprocessor jar file location> '|' <class name> ['|' <priority> ['|' <arguments>]]} 262 * where arguments are {@code <KEY> '=' <VALUE> [,...]} 263 * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} 264 * </pre> 265 */ 266 private static final Pattern CP_HTD_ATTR_VALUE_PATTERN = 267 Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); 268 269 private static final String CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN = "[^=,]+"; 270 private static final String CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN = "[^,]+"; 271 private static final Pattern CP_HTD_ATTR_VALUE_PARAM_PATTERN = Pattern.compile( 272 "(" + CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN + ")=(" + 273 CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?"); 274 private static final Pattern CP_HTD_ATTR_KEY_PATTERN = 275 Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); 276 277 /** 278 * Table descriptor for namespace table 279 */ 280 // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the 281 // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to 282 // rethink about adding back the setCacheDataInL1 for NS table. 283 // Note: namespace schema is hard-coded. In hbase3, namespace goes away; it is integrated into 284 // hbase:meta. 285 public static final TableDescriptor NAMESPACE_TABLEDESC 286 = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME) 287 .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES) 288 // Ten is arbitrary number. Keep versions to help debugging. 289 .setMaxVersions(10) 290 .setInMemory(true) 291 .setBlocksize(8 * 1024) 292 .setScope(HConstants.REPLICATION_SCOPE_LOCAL) 293 .build()) 294 .build(); 295 296 private final ModifyableTableDescriptor desc; 297 298 /** 299 * @param desc The table descriptor to serialize 300 * @return This instance serialized with pb with pb magic prefix 301 */ 302 public static byte[] toByteArray(TableDescriptor desc) { 303 if (desc instanceof ModifyableTableDescriptor) { 304 return ((ModifyableTableDescriptor) desc).toByteArray(); 305 } 306 return new ModifyableTableDescriptor(desc).toByteArray(); 307 } 308 309 /** 310 * The input should be created by {@link #toByteArray}. 311 * @param pbBytes A pb serialized TableDescriptor instance with pb magic prefix 312 * @return This instance serialized with pb with pb magic prefix 313 * @throws org.apache.hadoop.hbase.exceptions.DeserializationException 314 */ 315 public static TableDescriptor parseFrom(byte[] pbBytes) throws DeserializationException { 316 return ModifyableTableDescriptor.parseFrom(pbBytes); 317 } 318 319 public static TableDescriptorBuilder newBuilder(final TableName name) { 320 return new TableDescriptorBuilder(name); 321 } 322 323 public static TableDescriptor copy(TableDescriptor desc) { 324 return new ModifyableTableDescriptor(desc); 325 } 326 327 public static TableDescriptor copy(TableName name, TableDescriptor desc) { 328 return new ModifyableTableDescriptor(name, desc); 329 } 330 331 /** 332 * Copy all values, families, and name from the input. 333 * @param desc The desciptor to copy 334 * @return A clone of input 335 */ 336 public static TableDescriptorBuilder newBuilder(final TableDescriptor desc) { 337 return new TableDescriptorBuilder(desc); 338 } 339 340 private TableDescriptorBuilder(final TableName name) { 341 this.desc = new ModifyableTableDescriptor(name); 342 } 343 344 private TableDescriptorBuilder(final TableDescriptor desc) { 345 this.desc = new ModifyableTableDescriptor(desc); 346 } 347 348 /** 349 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 350 * Use {@link #setCoprocessor(String)} instead 351 */ 352 @Deprecated 353 public TableDescriptorBuilder addCoprocessor(String className) throws IOException { 354 return addCoprocessor(className, null, Coprocessor.PRIORITY_USER, null); 355 } 356 357 /** 358 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 359 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 360 */ 361 @Deprecated 362 public TableDescriptorBuilder addCoprocessor(String className, Path jarFilePath, 363 int priority, final Map<String, String> kvs) throws IOException { 364 desc.setCoprocessor( 365 CoprocessorDescriptorBuilder.newBuilder(className) 366 .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) 367 .setPriority(priority) 368 .setProperties(kvs == null ? Collections.emptyMap() : kvs) 369 .build()); 370 return this; 371 } 372 373 /** 374 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 375 * Use {@link #setCoprocessor(CoprocessorDescriptor)} instead 376 */ 377 @Deprecated 378 public TableDescriptorBuilder addCoprocessorWithSpec(final String specStr) throws IOException { 379 desc.setCoprocessorWithSpec(specStr); 380 return this; 381 } 382 383 /** 384 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 385 * Use {@link #setColumnFamily(ColumnFamilyDescriptor)} instead 386 */ 387 @Deprecated 388 public TableDescriptorBuilder addColumnFamily(final ColumnFamilyDescriptor family) { 389 desc.setColumnFamily(family); 390 return this; 391 } 392 393 public TableDescriptorBuilder setCoprocessor(String className) throws IOException { 394 return setCoprocessor(CoprocessorDescriptorBuilder.of(className)); 395 } 396 397 public TableDescriptorBuilder setCoprocessor(CoprocessorDescriptor cpDesc) throws IOException { 398 desc.setCoprocessor(Objects.requireNonNull(cpDesc)); 399 return this; 400 } 401 402 public TableDescriptorBuilder setCoprocessors(Collection<CoprocessorDescriptor> cpDescs) 403 throws IOException { 404 for (CoprocessorDescriptor cpDesc : cpDescs) { 405 desc.setCoprocessor(cpDesc); 406 } 407 return this; 408 } 409 410 public TableDescriptorBuilder setColumnFamily(final ColumnFamilyDescriptor family) { 411 desc.setColumnFamily(Objects.requireNonNull(family)); 412 return this; 413 } 414 415 public TableDescriptorBuilder setColumnFamilies( 416 final Collection<ColumnFamilyDescriptor> families) { 417 families.forEach(desc::setColumnFamily); 418 return this; 419 } 420 421 public TableDescriptorBuilder modifyColumnFamily(final ColumnFamilyDescriptor family) { 422 desc.modifyColumnFamily(Objects.requireNonNull(family)); 423 return this; 424 } 425 426 public TableDescriptorBuilder removeValue(Bytes key) { 427 desc.removeValue(key); 428 return this; 429 } 430 431 public TableDescriptorBuilder removeValue(byte[] key) { 432 desc.removeValue(key); 433 return this; 434 } 435 436 public TableDescriptorBuilder removeColumnFamily(final byte[] name) { 437 desc.removeColumnFamily(name); 438 return this; 439 } 440 441 public TableDescriptorBuilder removeCoprocessor(String className) { 442 desc.removeCoprocessor(className); 443 return this; 444 } 445 446 public TableDescriptorBuilder setCompactionEnabled(final boolean isEnable) { 447 desc.setCompactionEnabled(isEnable); 448 return this; 449 } 450 451 public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) { 452 desc.setSplitEnabled(isEnable); 453 return this; 454 } 455 456 public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) { 457 desc.setMergeEnabled(isEnable); 458 return this; 459 } 460 461 public TableDescriptorBuilder setDurability(Durability durability) { 462 desc.setDurability(durability); 463 return this; 464 } 465 466 public TableDescriptorBuilder setFlushPolicyClassName(String clazz) { 467 desc.setFlushPolicyClassName(clazz); 468 return this; 469 } 470 471 public TableDescriptorBuilder setMaxFileSize(long maxFileSize) { 472 desc.setMaxFileSize(maxFileSize); 473 return this; 474 } 475 476 public TableDescriptorBuilder setMemStoreFlushSize(long memstoreFlushSize) { 477 desc.setMemStoreFlushSize(memstoreFlushSize); 478 return this; 479 } 480 481 public TableDescriptorBuilder setNormalizerTargetRegionCount(final int regionCount) { 482 desc.setNormalizerTargetRegionCount(regionCount); 483 return this; 484 } 485 486 public TableDescriptorBuilder setNormalizerTargetRegionSize(final long regionSize) { 487 desc.setNormalizerTargetRegionSize(regionSize); 488 return this; 489 } 490 491 public TableDescriptorBuilder setNormalizationEnabled(final boolean isEnable) { 492 desc.setNormalizationEnabled(isEnable); 493 return this; 494 } 495 496 /** 497 * @deprecated since 2.0.0 and will be removed in 3.0.0. 498 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 499 */ 500 @Deprecated 501 public TableDescriptorBuilder setOwner(User owner) { 502 desc.setOwner(owner); 503 return this; 504 } 505 506 /** 507 * @deprecated since 2.0.0 and will be removed in 3.0.0. 508 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 509 */ 510 @Deprecated 511 public TableDescriptorBuilder setOwnerString(String ownerString) { 512 desc.setOwnerString(ownerString); 513 return this; 514 } 515 516 public TableDescriptorBuilder setPriority(int priority) { 517 desc.setPriority(priority); 518 return this; 519 } 520 521 public TableDescriptorBuilder setReadOnly(final boolean readOnly) { 522 desc.setReadOnly(readOnly); 523 return this; 524 } 525 526 public TableDescriptorBuilder setRegionMemStoreReplication(boolean memstoreReplication) { 527 desc.setRegionMemStoreReplication(memstoreReplication); 528 return this; 529 } 530 531 public TableDescriptorBuilder setRegionReplication(int regionReplication) { 532 desc.setRegionReplication(regionReplication); 533 return this; 534 } 535 536 public TableDescriptorBuilder setRegionSplitPolicyClassName(String clazz) { 537 desc.setRegionSplitPolicyClassName(clazz); 538 return this; 539 } 540 541 public TableDescriptorBuilder setValue(final String key, final String value) { 542 desc.setValue(key, value); 543 return this; 544 } 545 546 public TableDescriptorBuilder setValue(final Bytes key, final Bytes value) { 547 desc.setValue(key, value); 548 return this; 549 } 550 551 public TableDescriptorBuilder setValue(final byte[] key, final byte[] value) { 552 desc.setValue(key, value); 553 return this; 554 } 555 556 /** 557 * Sets replication scope all & only the columns already in the builder. Columns added later won't 558 * be backfilled with replication scope. 559 * @param scope replication scope 560 * @return a TableDescriptorBuilder 561 */ 562 public TableDescriptorBuilder setReplicationScope(int scope) { 563 Map<byte[], ColumnFamilyDescriptor> newFamilies = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 564 newFamilies.putAll(desc.families); 565 newFamilies 566 .forEach((cf, cfDesc) -> { 567 desc.removeColumnFamily(cf); 568 desc.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfDesc).setScope(scope) 569 .build()); 570 }); 571 return this; 572 } 573 574 public TableDescriptor build() { 575 return new ModifyableTableDescriptor(desc); 576 } 577 578 /** 579 * TODO: make this private after removing the HTableDescriptor 580 */ 581 @InterfaceAudience.Private 582 public static class ModifyableTableDescriptor 583 implements TableDescriptor, Comparable<ModifyableTableDescriptor> { 584 585 private final TableName name; 586 587 /** 588 * A map which holds the metadata information of the table. This metadata 589 * includes values like IS_META, SPLIT_POLICY, MAX_FILE_SIZE, 590 * READONLY, MEMSTORE_FLUSHSIZE etc... 591 */ 592 private final Map<Bytes, Bytes> values = new HashMap<>(); 593 594 /** 595 * Maps column family name to the respective FamilyDescriptors 596 */ 597 private final Map<byte[], ColumnFamilyDescriptor> families 598 = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR); 599 600 /** 601 * Construct a table descriptor specifying a TableName object 602 * 603 * @param name Table name. 604 * TODO: make this private after removing the HTableDescriptor 605 */ 606 @InterfaceAudience.Private 607 public ModifyableTableDescriptor(final TableName name) { 608 this(name, Collections.EMPTY_LIST, Collections.EMPTY_MAP); 609 } 610 611 private ModifyableTableDescriptor(final TableDescriptor desc) { 612 this(desc.getTableName(), Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 613 } 614 615 /** 616 * Construct a table descriptor by cloning the descriptor passed as a 617 * parameter. 618 * <p> 619 * Makes a deep copy of the supplied descriptor. 620 * @param name The new name 621 * @param desc The descriptor. 622 * TODO: make this private after removing the HTableDescriptor 623 */ 624 @InterfaceAudience.Private 625 @Deprecated // only used by HTableDescriptor. remove this method if HTD is removed 626 public ModifyableTableDescriptor(final TableName name, final TableDescriptor desc) { 627 this(name, Arrays.asList(desc.getColumnFamilies()), desc.getValues()); 628 } 629 630 private ModifyableTableDescriptor(final TableName name, final Collection<ColumnFamilyDescriptor> families, 631 Map<Bytes, Bytes> values) { 632 this.name = name; 633 families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); 634 this.values.putAll(values); 635 this.values.put(IS_META_KEY, 636 new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); 637 } 638 639 /** 640 * Checks if this table is <code> hbase:meta </code> region. 641 * 642 * @return true if this table is <code> hbase:meta </code> region 643 */ 644 @Override 645 public boolean isMetaRegion() { 646 return getOrDefault(IS_META_KEY, Boolean::valueOf, false); 647 } 648 649 /** 650 * Checks if the table is a <code>hbase:meta</code> table 651 * 652 * @return true if table is <code> hbase:meta </code> region. 653 */ 654 @Override 655 public boolean isMetaTable() { 656 return isMetaRegion(); 657 } 658 659 @Override 660 public Bytes getValue(Bytes key) { 661 Bytes rval = values.get(key); 662 return rval == null ? null : new Bytes(rval.copyBytes()); 663 } 664 665 @Override 666 public String getValue(String key) { 667 Bytes rval = values.get(new Bytes(Bytes.toBytes(key))); 668 return rval == null ? null : Bytes.toString(rval.get(), rval.getOffset(), rval.getLength()); 669 } 670 671 @Override 672 public byte[] getValue(byte[] key) { 673 Bytes value = values.get(new Bytes(key)); 674 return value == null ? null : value.copyBytes(); 675 } 676 677 private <T> T getOrDefault(Bytes key, Function<String, T> function, T defaultValue) { 678 Bytes value = values.get(key); 679 if (value == null) { 680 return defaultValue; 681 } else { 682 return function.apply(Bytes.toString(value.get(), value.getOffset(), value.getLength())); 683 } 684 } 685 686 /** 687 * Getter for fetching an unmodifiable {@link #values} map. 688 * 689 * @return unmodifiable map {@link #values}. 690 * @see #values 691 */ 692 @Override 693 public Map<Bytes, Bytes> getValues() { 694 // shallow pointer copy 695 return Collections.unmodifiableMap(values); 696 } 697 698 /** 699 * Setter for storing metadata as a (key, value) pair in {@link #values} map 700 * 701 * @param key The key. 702 * @param value The value. If null, removes the setting. 703 * @return the modifyable TD 704 * @see #values 705 */ 706 public ModifyableTableDescriptor setValue(byte[] key, byte[] value) { 707 return setValue(toBytesOrNull(key, v -> v), 708 toBytesOrNull(value, v -> v)); 709 } 710 711 public ModifyableTableDescriptor setValue(String key, String value) { 712 return setValue(toBytesOrNull(key, Bytes::toBytes), 713 toBytesOrNull(value, Bytes::toBytes)); 714 } 715 716 /** 717 * @param key The key. 718 * @param value The value. If null, removes the setting. 719 */ 720 private ModifyableTableDescriptor setValue(final Bytes key, 721 final String value) { 722 return setValue(key, toBytesOrNull(value, Bytes::toBytes)); 723 } 724 725 /** 726 * Setter for storing metadata as a (key, value) pair in {@link #values} map 727 * 728 * @param key The key. 729 * @param value The value. If null, removes the setting. 730 */ 731 public ModifyableTableDescriptor setValue(final Bytes key, final Bytes value) { 732 if (value == null || value.getLength() == 0) { 733 values.remove(key); 734 } else { 735 values.put(key, value); 736 } 737 return this; 738 } 739 740 private static <T> Bytes toBytesOrNull(T t, Function<T, byte[]> f) { 741 if (t == null) { 742 return null; 743 } else { 744 return new Bytes(f.apply(t)); 745 } 746 } 747 748 /** 749 * Remove metadata represented by the key from the {@link #values} map 750 * 751 * @param key Key whose key and value we're to remove from TableDescriptor 752 * parameters. 753 * @return the modifyable TD 754 */ 755 public ModifyableTableDescriptor removeValue(Bytes key) { 756 return setValue(key, (Bytes) null); 757 } 758 759 /** 760 * Remove metadata represented by the key from the {@link #values} map 761 * 762 * @param key Key whose key and value we're to remove from TableDescriptor 763 * parameters. 764 * @return the modifyable TD 765 */ 766 public ModifyableTableDescriptor removeValue(final byte[] key) { 767 return removeValue(new Bytes(key)); 768 } 769 770 /** 771 * Check if the readOnly flag of the table is set. If the readOnly flag is 772 * set then the contents of the table can only be read from but not 773 * modified. 774 * 775 * @return true if all columns in the table should be read only 776 */ 777 @Override 778 public boolean isReadOnly() { 779 return getOrDefault(READONLY_KEY, Boolean::valueOf, DEFAULT_READONLY); 780 } 781 782 /** 783 * Setting the table as read only sets all the columns in the table as read 784 * only. By default all tables are modifiable, but if the readOnly flag is 785 * set to true then the contents of the table can only be read but not 786 * modified. 787 * 788 * @param readOnly True if all of the columns in the table should be read 789 * only. 790 * @return the modifyable TD 791 */ 792 public ModifyableTableDescriptor setReadOnly(final boolean readOnly) { 793 return setValue(READONLY_KEY, Boolean.toString(readOnly)); 794 } 795 796 /** 797 * Check if the compaction enable flag of the table is true. If flag is 798 * false then no minor/major compactions will be done in real. 799 * 800 * @return true if table compaction enabled 801 */ 802 @Override 803 public boolean isCompactionEnabled() { 804 return getOrDefault(COMPACTION_ENABLED_KEY, Boolean::valueOf, DEFAULT_COMPACTION_ENABLED); 805 } 806 807 /** 808 * Setting the table compaction enable flag. 809 * 810 * @param isEnable True if enable compaction. 811 * @return the modifyable TD 812 */ 813 public ModifyableTableDescriptor setCompactionEnabled(final boolean isEnable) { 814 return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable)); 815 } 816 817 /** 818 * Check if the split enable flag of the table is true. If flag is false then no split will be 819 * done. 820 * 821 * @return true if table region split enabled 822 */ 823 @Override 824 public boolean isSplitEnabled() { 825 return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED); 826 } 827 828 /** 829 * Setting the table region split enable flag. 830 * @param isEnable True if enable region split. 831 * 832 * @return the modifyable TD 833 */ 834 public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) { 835 return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable)); 836 } 837 838 /** 839 * Check if the region merge enable flag of the table is true. If flag is false then no merge 840 * will be done. 841 * 842 * @return true if table region merge enabled 843 */ 844 @Override 845 public boolean isMergeEnabled() { 846 return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED); 847 } 848 849 /** 850 * Setting the table region merge enable flag. 851 * @param isEnable True if enable region merge. 852 * 853 * @return the modifyable TD 854 */ 855 public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) { 856 return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable)); 857 } 858 859 /** 860 * Check if normalization enable flag of the table is true. If flag is false 861 * then no region normalizer won't attempt to normalize this table. 862 * 863 * @return true if region normalization is enabled for this table 864 */ 865 @Override 866 public boolean isNormalizationEnabled() { 867 return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); 868 } 869 870 /** 871 * Check if there is the target region count. If so, the normalize plan will be calculated based 872 * on the target region count. 873 * @return target region count after normalize done 874 */ 875 @Override 876 public int getNormalizerTargetRegionCount() { 877 return getOrDefault(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer::valueOf, 878 Integer.valueOf(-1)); 879 } 880 881 /** 882 * Check if there is the target region size. If so, the normalize plan will be calculated based 883 * on the target region size. 884 * @return target region size after normalize done 885 */ 886 @Override 887 public long getNormalizerTargetRegionSize() { 888 return getOrDefault(NORMALIZER_TARGET_REGION_SIZE_KEY, Long::valueOf, Long.valueOf(-1)); 889 } 890 891 /** 892 * Setting the table normalization enable flag. 893 * 894 * @param isEnable True if enable normalization. 895 * @return the modifyable TD 896 */ 897 public ModifyableTableDescriptor setNormalizationEnabled(final boolean isEnable) { 898 return setValue(NORMALIZATION_ENABLED_KEY, Boolean.toString(isEnable)); 899 } 900 901 /** 902 * Setting the target region count of table normalization . 903 * @param regionCount the target region count. 904 * @return the modifyable TD 905 */ 906 public ModifyableTableDescriptor setNormalizerTargetRegionCount(final int regionCount) { 907 return setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount)); 908 } 909 910 /** 911 * Setting the target region size of table normalization. 912 * @param regionSize the target region size. 913 * @return the modifyable TD 914 */ 915 public ModifyableTableDescriptor setNormalizerTargetRegionSize(final long regionSize) { 916 return setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize)); 917 } 918 919 /** 920 * Sets the {@link Durability} setting for the table. This defaults to 921 * Durability.USE_DEFAULT. 922 * 923 * @param durability enum value 924 * @return the modifyable TD 925 */ 926 public ModifyableTableDescriptor setDurability(Durability durability) { 927 return setValue(DURABILITY_KEY, durability.name()); 928 } 929 930 /** 931 * Returns the durability setting for the table. 932 * 933 * @return durability setting for the table. 934 */ 935 @Override 936 public Durability getDurability() { 937 return getOrDefault(DURABILITY_KEY, Durability::valueOf, DEFAULT_DURABLITY); 938 } 939 940 /** 941 * Get the name of the table 942 * 943 * @return TableName 944 */ 945 @Override 946 public TableName getTableName() { 947 return name; 948 } 949 950 /** 951 * This sets the class associated with the region split policy which 952 * determines when a region split should occur. The class used by default is 953 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 954 * 955 * @param clazz the class name 956 * @return the modifyable TD 957 */ 958 public ModifyableTableDescriptor setRegionSplitPolicyClassName(String clazz) { 959 return setValue(SPLIT_POLICY_KEY, clazz); 960 } 961 962 /** 963 * This gets the class associated with the region split policy which 964 * determines when a region split should occur. The class used by default is 965 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 966 * 967 * @return the class name of the region split policy for this table. If this 968 * returns null, the default split policy is used. 969 */ 970 @Override 971 public String getRegionSplitPolicyClassName() { 972 return getOrDefault(SPLIT_POLICY_KEY, Function.identity(), null); 973 } 974 975 /** 976 * Returns the maximum size upto which a region can grow to after which a 977 * region split is triggered. The region size is represented by the size of 978 * the biggest store file in that region. 979 * 980 * @return max hregion size for table, -1 if not set. 981 * 982 * @see #setMaxFileSize(long) 983 */ 984 @Override 985 public long getMaxFileSize() { 986 return getOrDefault(MAX_FILESIZE_KEY, Long::valueOf, (long) -1); 987 } 988 989 /** 990 * Sets the maximum size upto which a region can grow to after which a 991 * region split is triggered. The region size is represented by the size of 992 * the biggest store file in that region, i.e. If the biggest store file 993 * grows beyond the maxFileSize, then the region split is triggered. This 994 * defaults to a value of 256 MB. 995 * <p> 996 * This is not an absolute value and might vary. Assume that a single row 997 * exceeds the maxFileSize then the storeFileSize will be greater than 998 * maxFileSize since a single row cannot be split across multiple regions 999 * </p> 1000 * 1001 * @param maxFileSize The maximum file size that a store file can grow to 1002 * before a split is triggered. 1003 * @return the modifyable TD 1004 */ 1005 public ModifyableTableDescriptor setMaxFileSize(long maxFileSize) { 1006 return setValue(MAX_FILESIZE_KEY, Long.toString(maxFileSize)); 1007 } 1008 1009 /** 1010 * Returns the size of the memstore after which a flush to filesystem is 1011 * triggered. 1012 * 1013 * @return memory cache flush size for each hregion, -1 if not set. 1014 * 1015 * @see #setMemStoreFlushSize(long) 1016 */ 1017 @Override 1018 public long getMemStoreFlushSize() { 1019 return getOrDefault(MEMSTORE_FLUSHSIZE_KEY, Long::valueOf, (long) -1); 1020 } 1021 1022 /** 1023 * Represents the maximum size of the memstore after which the contents of 1024 * the memstore are flushed to the filesystem. This defaults to a size of 64 1025 * MB. 1026 * 1027 * @param memstoreFlushSize memory cache flush size for each hregion 1028 * @return the modifyable TD 1029 */ 1030 public ModifyableTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { 1031 return setValue(MEMSTORE_FLUSHSIZE_KEY, Long.toString(memstoreFlushSize)); 1032 } 1033 1034 /** 1035 * This sets the class associated with the flush policy which determines 1036 * determines the stores need to be flushed when flushing a region. The 1037 * class used by default is defined in 1038 * org.apache.hadoop.hbase.regionserver.FlushPolicy. 1039 * 1040 * @param clazz the class name 1041 * @return the modifyable TD 1042 */ 1043 public ModifyableTableDescriptor setFlushPolicyClassName(String clazz) { 1044 return setValue(FLUSH_POLICY_KEY, clazz); 1045 } 1046 1047 /** 1048 * This gets the class associated with the flush policy which determines the 1049 * stores need to be flushed when flushing a region. The class used by 1050 * default is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. 1051 * 1052 * @return the class name of the flush policy for this table. If this 1053 * returns null, the default flush policy is used. 1054 */ 1055 @Override 1056 public String getFlushPolicyClassName() { 1057 return getOrDefault(FLUSH_POLICY_KEY, Function.identity(), null); 1058 } 1059 1060 /** 1061 * Adds a column family. For the updating purpose please use 1062 * {@link #modifyColumnFamily(ColumnFamilyDescriptor)} instead. 1063 * 1064 * @param family to add. 1065 * @return the modifyable TD 1066 */ 1067 public ModifyableTableDescriptor setColumnFamily(final ColumnFamilyDescriptor family) { 1068 if (family.getName() == null || family.getName().length <= 0) { 1069 throw new IllegalArgumentException("Family name cannot be null or empty"); 1070 } 1071 int flength = family.getName() == null ? 0 : family.getName().length; 1072 if (flength > Byte.MAX_VALUE) { 1073 throw new IllegalArgumentException("The length of family name is bigger than " + Byte.MAX_VALUE); 1074 } 1075 if (hasColumnFamily(family.getName())) { 1076 throw new IllegalArgumentException("Family '" 1077 + family.getNameAsString() + "' already exists so cannot be added"); 1078 } 1079 return putColumnFamily(family); 1080 } 1081 1082 /** 1083 * Modifies the existing column family. 1084 * 1085 * @param family to update 1086 * @return this (for chained invocation) 1087 */ 1088 public ModifyableTableDescriptor modifyColumnFamily(final ColumnFamilyDescriptor family) { 1089 if (family.getName() == null || family.getName().length <= 0) { 1090 throw new IllegalArgumentException("Family name cannot be null or empty"); 1091 } 1092 if (!hasColumnFamily(family.getName())) { 1093 throw new IllegalArgumentException("Column family '" + family.getNameAsString() 1094 + "' does not exist"); 1095 } 1096 return putColumnFamily(family); 1097 } 1098 1099 private ModifyableTableDescriptor putColumnFamily(ColumnFamilyDescriptor family) { 1100 families.put(family.getName(), family); 1101 return this; 1102 } 1103 1104 /** 1105 * Checks to see if this table contains the given column family 1106 * 1107 * @param familyName Family name or column name. 1108 * @return true if the table contains the specified family name 1109 */ 1110 @Override 1111 public boolean hasColumnFamily(final byte[] familyName) { 1112 return families.containsKey(familyName); 1113 } 1114 1115 /** 1116 * @return Name of this table and then a map of all of the column family descriptors. 1117 */ 1118 @Override 1119 public String toString() { 1120 StringBuilder s = new StringBuilder(); 1121 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1122 s.append(getValues(true)); 1123 families.values().forEach(f -> s.append(", ").append(f)); 1124 return s.toString(); 1125 } 1126 1127 /** 1128 * @return Name of this table and then a map of all of the column family 1129 * descriptors (with only the non-default column family attributes) 1130 */ 1131 @Override 1132 public String toStringCustomizedValues() { 1133 StringBuilder s = new StringBuilder(); 1134 s.append('\'').append(Bytes.toString(name.getName())).append('\''); 1135 s.append(getValues(false)); 1136 families.values().forEach(hcd -> s.append(", ").append(hcd.toStringCustomizedValues())); 1137 return s.toString(); 1138 } 1139 1140 /** 1141 * @return map of all table attributes formatted into string. 1142 */ 1143 public String toStringTableAttributes() { 1144 return getValues(true).toString(); 1145 } 1146 1147 private StringBuilder getValues(boolean printDefaults) { 1148 StringBuilder s = new StringBuilder(); 1149 1150 // step 1: set partitioning and pruning 1151 Set<Bytes> reservedKeys = new TreeSet<>(); 1152 Set<Bytes> userKeys = new TreeSet<>(); 1153 for (Map.Entry<Bytes, Bytes> entry : values.entrySet()) { 1154 if (entry.getKey() == null || entry.getKey().get() == null) { 1155 continue; 1156 } 1157 String key = Bytes.toString(entry.getKey().get()); 1158 // in this section, print out reserved keywords + coprocessor info 1159 if (!RESERVED_KEYWORDS.contains(entry.getKey()) && !key.startsWith("coprocessor$")) { 1160 userKeys.add(entry.getKey()); 1161 continue; 1162 } 1163 // only print out IS_META if true 1164 String value = Bytes.toString(entry.getValue().get()); 1165 if (key.equalsIgnoreCase(IS_META)) { 1166 if (Boolean.valueOf(value) == false) { 1167 continue; 1168 } 1169 } 1170 // see if a reserved key is a default value. may not want to print it out 1171 if (printDefaults 1172 || !DEFAULT_VALUES.containsKey(key) 1173 || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { 1174 reservedKeys.add(entry.getKey()); 1175 } 1176 } 1177 1178 // early exit optimization 1179 boolean hasAttributes = !reservedKeys.isEmpty() || !userKeys.isEmpty(); 1180 if (!hasAttributes) { 1181 return s; 1182 } 1183 1184 s.append(", {"); 1185 // step 2: printing attributes 1186 if (hasAttributes) { 1187 s.append("TABLE_ATTRIBUTES => {"); 1188 1189 // print all reserved keys first 1190 boolean printCommaForAttr = false; 1191 for (Bytes k : reservedKeys) { 1192 String key = Bytes.toString(k.get()); 1193 String value = Bytes.toStringBinary(values.get(k).get()); 1194 if (printCommaForAttr) { 1195 s.append(", "); 1196 } 1197 printCommaForAttr = true; 1198 s.append(key); 1199 s.append(" => "); 1200 s.append('\'').append(value).append('\''); 1201 } 1202 1203 if (!userKeys.isEmpty()) { 1204 // print all non-reserved as a separate subset 1205 if (printCommaForAttr) { 1206 s.append(", "); 1207 } 1208 s.append(HConstants.METADATA).append(" => "); 1209 s.append("{"); 1210 boolean printCommaForCfg = false; 1211 for (Bytes k : userKeys) { 1212 String key = Bytes.toString(k.get()); 1213 String value = Bytes.toStringBinary(values.get(k).get()); 1214 if (printCommaForCfg) { 1215 s.append(", "); 1216 } 1217 printCommaForCfg = true; 1218 s.append('\'').append(key).append('\''); 1219 s.append(" => "); 1220 s.append('\'').append(value).append('\''); 1221 } 1222 s.append("}"); 1223 } 1224 1225 s.append("}"); 1226 } 1227 1228 s.append("}"); // end METHOD 1229 return s; 1230 } 1231 1232 /** 1233 * Compare the contents of the descriptor with another one passed as a 1234 * parameter. Checks if the obj passed is an instance of ModifyableTableDescriptor, 1235 * if yes then the contents of the descriptors are compared. 1236 * 1237 * @param obj The object to compare 1238 * @return true if the contents of the the two descriptors exactly match 1239 * 1240 * @see java.lang.Object#equals(java.lang.Object) 1241 */ 1242 @Override 1243 public boolean equals(Object obj) { 1244 if (this == obj) { 1245 return true; 1246 } 1247 if (obj instanceof ModifyableTableDescriptor) { 1248 return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0; 1249 } 1250 return false; 1251 } 1252 1253 /** 1254 * @return hash code 1255 */ 1256 @Override 1257 public int hashCode() { 1258 int result = this.name.hashCode(); 1259 if (this.families.size() > 0) { 1260 for (ColumnFamilyDescriptor e : this.families.values()) { 1261 result ^= e.hashCode(); 1262 } 1263 } 1264 result ^= values.hashCode(); 1265 return result; 1266 } 1267 1268 // Comparable 1269 /** 1270 * Compares the descriptor with another descriptor which is passed as a 1271 * parameter. This compares the content of the two descriptors and not the 1272 * reference. 1273 * 1274 * @param other The MTD to compare 1275 * @return 0 if the contents of the descriptors are exactly matching, 1 if 1276 * there is a mismatch in the contents 1277 */ 1278 @Override 1279 public int compareTo(final ModifyableTableDescriptor other) { 1280 return TableDescriptor.COMPARATOR.compare(this, other); 1281 } 1282 1283 @Override 1284 public ColumnFamilyDescriptor[] getColumnFamilies() { 1285 return families.values().toArray(new ColumnFamilyDescriptor[families.size()]); 1286 } 1287 1288 /** 1289 * Returns the configured replicas per region 1290 */ 1291 @Override 1292 public int getRegionReplication() { 1293 return getOrDefault(REGION_REPLICATION_KEY, Integer::valueOf, DEFAULT_REGION_REPLICATION); 1294 } 1295 1296 /** 1297 * Sets the number of replicas per region. 1298 * 1299 * @param regionReplication the replication factor per region 1300 * @return the modifyable TD 1301 */ 1302 public ModifyableTableDescriptor setRegionReplication(int regionReplication) { 1303 return setValue(REGION_REPLICATION_KEY, Integer.toString(regionReplication)); 1304 } 1305 1306 /** 1307 * @return true if the read-replicas memstore replication is enabled. 1308 */ 1309 @Override 1310 public boolean hasRegionMemStoreReplication() { 1311 return getOrDefault(REGION_MEMSTORE_REPLICATION_KEY, Boolean::valueOf, DEFAULT_REGION_MEMSTORE_REPLICATION); 1312 } 1313 1314 /** 1315 * Enable or Disable the memstore replication from the primary region to the 1316 * replicas. The replication will be used only for meta operations (e.g. 1317 * flush, compaction, ...) 1318 * 1319 * @param memstoreReplication true if the new data written to the primary 1320 * region should be replicated. false if the secondaries can tollerate to 1321 * have new data only when the primary flushes the memstore. 1322 * @return the modifyable TD 1323 */ 1324 public ModifyableTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { 1325 setValue(REGION_MEMSTORE_REPLICATION_KEY, Boolean.toString(memstoreReplication)); 1326 // If the memstore replication is setup, we do not have to wait for observing a flush event 1327 // from primary before starting to serve reads, because gaps from replication is not applicable 1328 return setValue(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, 1329 Boolean.toString(memstoreReplication)); 1330 } 1331 1332 public ModifyableTableDescriptor setPriority(int priority) { 1333 return setValue(PRIORITY_KEY, Integer.toString(priority)); 1334 } 1335 1336 @Override 1337 public int getPriority() { 1338 return getOrDefault(PRIORITY_KEY, Integer::valueOf, DEFAULT_PRIORITY); 1339 } 1340 1341 /** 1342 * Returns all the column family names of the current table. The map of 1343 * TableDescriptor contains mapping of family name to ColumnFamilyDescriptor. 1344 * This returns all the keys of the family map which represents the column 1345 * family names of the table. 1346 * 1347 * @return Immutable sorted set of the keys of the families. 1348 */ 1349 @Override 1350 public Set<byte[]> getColumnFamilyNames() { 1351 return Collections.unmodifiableSet(this.families.keySet()); 1352 } 1353 1354 /** 1355 * Returns the ColumnFamilyDescriptor for a specific column family with name as 1356 * specified by the parameter column. 1357 * 1358 * @param column Column family name 1359 * @return Column descriptor for the passed family name or the family on 1360 * passed in column. 1361 */ 1362 @Override 1363 public ColumnFamilyDescriptor getColumnFamily(final byte[] column) { 1364 return this.families.get(column); 1365 } 1366 1367 /** 1368 * Removes the ColumnFamilyDescriptor with name specified by the parameter column 1369 * from the table descriptor 1370 * 1371 * @param column Name of the column family to be removed. 1372 * @return Column descriptor for the passed family name or the family on 1373 * passed in column. 1374 */ 1375 public ColumnFamilyDescriptor removeColumnFamily(final byte[] column) { 1376 return this.families.remove(column); 1377 } 1378 1379 /** 1380 * Add a table coprocessor to this table. The coprocessor type must be 1381 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1382 * check if the class can be loaded or not. Whether a coprocessor is 1383 * loadable or not will be determined when a region is opened. 1384 * 1385 * @param className Full class name. 1386 * @throws IOException 1387 * @return the modifyable TD 1388 */ 1389 public ModifyableTableDescriptor setCoprocessor(String className) throws IOException { 1390 return setCoprocessor( 1391 CoprocessorDescriptorBuilder.newBuilder(className).setPriority(Coprocessor.PRIORITY_USER) 1392 .build()); 1393 } 1394 1395 /** 1396 * Add a table coprocessor to this table. The coprocessor type must be 1397 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1398 * check if the class can be loaded or not. Whether a coprocessor is 1399 * loadable or not will be determined when a region is opened. 1400 * 1401 * @throws IOException any illegal parameter key/value 1402 * @return the modifyable TD 1403 */ 1404 public ModifyableTableDescriptor setCoprocessor(CoprocessorDescriptor cp) 1405 throws IOException { 1406 checkHasCoprocessor(cp.getClassName()); 1407 if (cp.getPriority() < 0) { 1408 throw new IOException("Priority must be bigger than or equal with zero, current:" 1409 + cp.getPriority()); 1410 } 1411 // Validate parameter kvs and then add key/values to kvString. 1412 StringBuilder kvString = new StringBuilder(); 1413 for (Map.Entry<String, String> e : cp.getProperties().entrySet()) { 1414 if (!e.getKey().matches(CP_HTD_ATTR_VALUE_PARAM_KEY_PATTERN)) { 1415 throw new IOException("Illegal parameter key = " + e.getKey()); 1416 } 1417 if (!e.getValue().matches(CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN)) { 1418 throw new IOException("Illegal parameter (" + e.getKey() 1419 + ") value = " + e.getValue()); 1420 } 1421 if (kvString.length() != 0) { 1422 kvString.append(','); 1423 } 1424 kvString.append(e.getKey()); 1425 kvString.append('='); 1426 kvString.append(e.getValue()); 1427 } 1428 1429 String value = cp.getJarPath().orElse("") 1430 + "|" + cp.getClassName() + "|" + Integer.toString(cp.getPriority()) + "|" 1431 + kvString.toString(); 1432 return setCoprocessorToMap(value); 1433 } 1434 1435 /** 1436 * Add a table coprocessor to this table. The coprocessor type must be 1437 * org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't 1438 * check if the class can be loaded or not. Whether a coprocessor is 1439 * loadable or not will be determined when a region is opened. 1440 * 1441 * @param specStr The Coprocessor specification all in in one String 1442 * @throws IOException 1443 * @return the modifyable TD 1444 * @deprecated used by HTableDescriptor and admin.rb. 1445 * As of release 2.0.0, this will be removed in HBase 3.0.0. 1446 */ 1447 @Deprecated 1448 public ModifyableTableDescriptor setCoprocessorWithSpec(final String specStr) 1449 throws IOException { 1450 CoprocessorDescriptor cpDesc = toCoprocessorDescriptor(specStr).orElseThrow( 1451 () -> new IllegalArgumentException( 1452 "Format does not match " + CP_HTD_ATTR_VALUE_PATTERN + ": " + specStr)); 1453 checkHasCoprocessor(cpDesc.getClassName()); 1454 return setCoprocessorToMap(specStr); 1455 } 1456 1457 private void checkHasCoprocessor(final String className) throws IOException { 1458 if (hasCoprocessor(className)) { 1459 throw new IOException("Coprocessor " + className + " already exists."); 1460 } 1461 } 1462 1463 /** 1464 * Add coprocessor to values Map 1465 * @param specStr The Coprocessor specification all in in one String 1466 * @return Returns <code>this</code> 1467 */ 1468 private ModifyableTableDescriptor setCoprocessorToMap(final String specStr) { 1469 if (specStr == null) { 1470 return this; 1471 } 1472 // generate a coprocessor key 1473 int maxCoprocessorNumber = 0; 1474 Matcher keyMatcher; 1475 for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) { 1476 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get())); 1477 if (!keyMatcher.matches()) { 1478 continue; 1479 } 1480 maxCoprocessorNumber = Math.max(Integer.parseInt(keyMatcher.group(1)), maxCoprocessorNumber); 1481 } 1482 maxCoprocessorNumber++; 1483 String key = "coprocessor$" + Integer.toString(maxCoprocessorNumber); 1484 return setValue(new Bytes(Bytes.toBytes(key)), new Bytes(Bytes.toBytes(specStr))); 1485 } 1486 1487 /** 1488 * Check if the table has an attached co-processor represented by the name 1489 * className 1490 * 1491 * @param classNameToMatch - Class name of the co-processor 1492 * @return true of the table has a co-processor className 1493 */ 1494 @Override 1495 public boolean hasCoprocessor(String classNameToMatch) { 1496 return getCoprocessorDescriptors().stream().anyMatch(cp -> cp.getClassName() 1497 .equals(classNameToMatch)); 1498 } 1499 1500 /** 1501 * Return the list of attached co-processor represented by their name 1502 * className 1503 * 1504 * @return The list of co-processors classNames 1505 */ 1506 @Override 1507 public List<CoprocessorDescriptor> getCoprocessorDescriptors() { 1508 List<CoprocessorDescriptor> result = new ArrayList<>(); 1509 for (Map.Entry<Bytes, Bytes> e: getValues().entrySet()) { 1510 String key = Bytes.toString(e.getKey().get()).trim(); 1511 if (CP_HTD_ATTR_KEY_PATTERN.matcher(key).matches()) { 1512 toCoprocessorDescriptor(Bytes.toString(e.getValue().get()).trim()) 1513 .ifPresent(result::add); 1514 } 1515 } 1516 return result; 1517 } 1518 1519 /** 1520 * Remove a coprocessor from those set on the table 1521 * 1522 * @param className Class name of the co-processor 1523 */ 1524 public void removeCoprocessor(String className) { 1525 Bytes match = null; 1526 Matcher keyMatcher; 1527 Matcher valueMatcher; 1528 for (Map.Entry<Bytes, Bytes> e : this.values 1529 .entrySet()) { 1530 keyMatcher = CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e 1531 .getKey().get())); 1532 if (!keyMatcher.matches()) { 1533 continue; 1534 } 1535 valueMatcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(Bytes 1536 .toString(e.getValue().get())); 1537 if (!valueMatcher.matches()) { 1538 continue; 1539 } 1540 // get className and compare 1541 String clazz = valueMatcher.group(2).trim(); // classname is the 2nd field 1542 // remove the CP if it is present 1543 if (clazz.equals(className.trim())) { 1544 match = e.getKey(); 1545 break; 1546 } 1547 } 1548 // if we found a match, remove it 1549 if (match != null) { 1550 ModifyableTableDescriptor.this.removeValue(match); 1551 } 1552 } 1553 1554 /** 1555 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1556 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1557 */ 1558 @Deprecated 1559 public ModifyableTableDescriptor setOwner(User owner) { 1560 return setOwnerString(owner != null ? owner.getShortName() : null); 1561 } 1562 1563 /** 1564 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1565 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1566 */ 1567 // used by admin.rb:alter(table_name,*args) to update owner. 1568 @Deprecated 1569 public ModifyableTableDescriptor setOwnerString(String ownerString) { 1570 return setValue(OWNER_KEY, ownerString); 1571 } 1572 1573 /** 1574 * @deprecated since 2.0.0 and will be removed in 3.0.0. 1575 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 1576 */ 1577 @Override 1578 @Deprecated 1579 public String getOwnerString() { 1580 // Note that every table should have an owner (i.e. should have OWNER_KEY set). 1581 // hbase:meta should return system user as owner, not null (see 1582 // MasterFileSystem.java:bootstrap()). 1583 return getOrDefault(OWNER_KEY, Function.identity(), null); 1584 } 1585 1586 /** 1587 * @return the bytes in pb format 1588 */ 1589 private byte[] toByteArray() { 1590 return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray()); 1591 } 1592 1593 /** 1594 * @param bytes A pb serialized {@link ModifyableTableDescriptor} instance 1595 * with pb magic prefix 1596 * @return An instance of {@link ModifyableTableDescriptor} made from 1597 * <code>bytes</code> 1598 * @throws DeserializationException 1599 * @see #toByteArray() 1600 */ 1601 private static TableDescriptor parseFrom(final byte[] bytes) 1602 throws DeserializationException { 1603 if (!ProtobufUtil.isPBMagicPrefix(bytes)) { 1604 throw new DeserializationException("Expected PB encoded ModifyableTableDescriptor"); 1605 } 1606 int pblen = ProtobufUtil.lengthOfPBMagic(); 1607 HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder(); 1608 try { 1609 ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); 1610 return ProtobufUtil.toTableDescriptor(builder.build()); 1611 } catch (IOException e) { 1612 throw new DeserializationException(e); 1613 } 1614 } 1615 1616 @Override 1617 public int getColumnFamilyCount() { 1618 return families.size(); 1619 } 1620 } 1621 1622 private static Optional<CoprocessorDescriptor> toCoprocessorDescriptor(String spec) { 1623 Matcher matcher = CP_HTD_ATTR_VALUE_PATTERN.matcher(spec); 1624 if (matcher.matches()) { 1625 // jar file path can be empty if the cp class can be loaded 1626 // from class loader. 1627 String path = matcher.group(1).trim().isEmpty() ? 1628 null : matcher.group(1).trim(); 1629 String className = matcher.group(2).trim(); 1630 if (className.isEmpty()) { 1631 return Optional.empty(); 1632 } 1633 String priorityStr = matcher.group(3).trim(); 1634 int priority = priorityStr.isEmpty() ? 1635 Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); 1636 String cfgSpec = null; 1637 try { 1638 cfgSpec = matcher.group(4); 1639 } catch (IndexOutOfBoundsException ex) { 1640 // ignore 1641 } 1642 Map<String, String> ourConf = new TreeMap<>(); 1643 if (cfgSpec != null && !cfgSpec.trim().equals("|")) { 1644 cfgSpec = cfgSpec.substring(cfgSpec.indexOf('|') + 1); 1645 Matcher m = CP_HTD_ATTR_VALUE_PARAM_PATTERN.matcher(cfgSpec); 1646 while (m.find()) { 1647 ourConf.put(m.group(1), m.group(2)); 1648 } 1649 } 1650 return Optional.of(CoprocessorDescriptorBuilder.newBuilder(className) 1651 .setJarPath(path) 1652 .setPriority(priority) 1653 .setProperties(ourConf) 1654 .build()); 1655 } 1656 return Optional.empty(); 1657 } 1658}