001/** 002 * 003 * Licensed to the Apache Software Foundation (ASF) under one 004 * or more contributor license agreements. See the NOTICE file 005 * distributed with this work for additional information 006 * regarding copyright ownership. The ASF licenses this file 007 * to you under the Apache License, Version 2.0 (the 008 * "License"); you may not use this file except in compliance 009 * with the License. You may obtain a copy of the License at 010 * 011 * http://www.apache.org/licenses/LICENSE-2.0 012 * 013 * Unless required by applicable law or agreed to in writing, software 014 * distributed under the License is distributed on an "AS IS" BASIS, 015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 016 * See the License for the specific language governing permissions and 017 * limitations under the License. 018 */ 019package org.apache.hadoop.hbase.client; 020 021import java.util.Arrays; 022import java.util.Collection; 023import java.util.Comparator; 024import java.util.Iterator; 025import java.util.Map; 026import java.util.Optional; 027import java.util.Set; 028import java.util.stream.Collectors; 029import java.util.stream.Stream; 030import org.apache.hadoop.hbase.HConstants; 031import org.apache.hadoop.hbase.TableName; 032import org.apache.hadoop.hbase.util.Bytes; 033import org.apache.yetus.audience.InterfaceAudience; 034 035/** 036 * TableDescriptor contains the details about an HBase table such as the descriptors of 037 * all the column families, is the table a catalog table, <code> hbase:meta </code>, 038 * if the table is read only, the maximum size of the memstore, 039 * when the region split should occur, coprocessors associated with it etc... 040 */ 041@InterfaceAudience.Public 042public interface TableDescriptor { 043 044 @InterfaceAudience.Private 045 Comparator<TableDescriptor> COMPARATOR = getComparator(ColumnFamilyDescriptor.COMPARATOR); 046 047 @InterfaceAudience.Private 048 Comparator<TableDescriptor> COMPARATOR_IGNORE_REPLICATION = 049 getComparator(ColumnFamilyDescriptor.COMPARATOR_IGNORE_REPLICATION); 050 051 static Comparator<TableDescriptor> 052 getComparator(Comparator<ColumnFamilyDescriptor> cfComparator) { 053 return (TableDescriptor lhs, TableDescriptor rhs) -> { 054 int result = lhs.getTableName().compareTo(rhs.getTableName()); 055 if (result != 0) { 056 return result; 057 } 058 Collection<ColumnFamilyDescriptor> lhsFamilies = Arrays.asList(lhs.getColumnFamilies()); 059 Collection<ColumnFamilyDescriptor> rhsFamilies = Arrays.asList(rhs.getColumnFamilies()); 060 result = Integer.compare(lhsFamilies.size(), rhsFamilies.size()); 061 if (result != 0) { 062 return result; 063 } 064 065 for (Iterator<ColumnFamilyDescriptor> it = lhsFamilies.iterator(), it2 = 066 rhsFamilies.iterator(); it.hasNext();) { 067 result = cfComparator.compare(it.next(), it2.next()); 068 if (result != 0) { 069 return result; 070 } 071 } 072 // punt on comparison for ordering, just calculate difference 073 return Integer.compare(lhs.getValues().hashCode(), rhs.getValues().hashCode()); 074 }; 075 } 076 077 /** 078 * Returns the count of the column families of the table. 079 * 080 * @return Count of column families of the table 081 */ 082 int getColumnFamilyCount(); 083 084 /** 085 * Return the list of attached co-processor represented 086 * 087 * @return The list of CoprocessorDescriptor 088 */ 089 Collection<CoprocessorDescriptor> getCoprocessorDescriptors(); 090 091 /** 092 * Return the list of attached co-processor represented by their name 093 * className 094 * @return The list of co-processors classNames 095 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. 096 * Use {@link #getCoprocessorDescriptors()} instead 097 */ 098 @Deprecated 099 default Collection<String> getCoprocessors() { 100 return getCoprocessorDescriptors().stream() 101 .map(CoprocessorDescriptor::getClassName) 102 .collect(Collectors.toList()); 103 } 104 105 /** 106 * Returns the durability setting for the table. 107 * 108 * @return durability setting for the table. 109 */ 110 Durability getDurability(); 111 112 /** 113 * Returns an unmodifiable collection of all the {@link ColumnFamilyDescriptor} of 114 * all the column families of the table. 115 * 116 * @return An array of {@link ColumnFamilyDescriptor} of all the column 117 * families. 118 */ 119 ColumnFamilyDescriptor[] getColumnFamilies(); 120 121 /** 122 * Returns all the column family names of the current table. The map of 123 * TableDescriptor contains mapping of family name to ColumnDescriptor. 124 * This returns all the keys of the family map which represents the column 125 * family names of the table. 126 * 127 * @return Immutable sorted set of the keys of the families. 128 */ 129 Set<byte[]> getColumnFamilyNames(); 130 131 /** 132 * Returns the ColumnDescriptor for a specific column family with name as 133 * specified by the parameter column. 134 * 135 * @param name Column family name 136 * @return Column descriptor for the passed family name or the family on 137 * passed in column. 138 */ 139 ColumnFamilyDescriptor getColumnFamily(final byte[] name); 140 141 /** 142 * This gets the class associated with the flush policy which determines the 143 * stores need to be flushed when flushing a region. The class used by default 144 * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy. 145 * 146 * @return the class name of the flush policy for this table. If this returns 147 * null, the default flush policy is used. 148 */ 149 String getFlushPolicyClassName(); 150 151 /** 152 * Returns the maximum size upto which a region can grow to after which a 153 * region split is triggered. The region size is represented by the size of 154 * the biggest store file in that region. 155 * 156 * @return max hregion size for table, -1 if not set. 157 */ 158 long getMaxFileSize(); 159 160 /** 161 * Returns the size of the memstore after which a flush to filesystem is 162 * triggered. 163 * 164 * @return memory cache flush size for each hregion, -1 if not set. 165 */ 166 long getMemStoreFlushSize(); 167 168 // TODO: Currently this is used RPC scheduling only. Make it more generic than this; allow it 169 // to also be priority when scheduling procedures that pertain to this table scheduling first 170 // those tables with the highest priority (From Yi Liang over on HBASE-18109). 171 int getPriority(); 172 173 /** 174 * @return Returns the configured replicas per region 175 */ 176 int getRegionReplication(); 177 178 /** 179 * This gets the class associated with the region split policy which 180 * determines when a region split should occur. The class used by default is 181 * defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy 182 * 183 * @return the class name of the region split policy for this table. If this 184 * returns null, the default split policy is used. 185 */ 186 String getRegionSplitPolicyClassName(); 187 188 /** 189 * Get the name of the table 190 * 191 * @return TableName 192 */ 193 TableName getTableName(); 194 195 /** 196 * @deprecated since 2.0.0 and will be removed in 3.0.0. 197 * @see <a href="https://issues.apache.org/jira/browse/HBASE-15583">HBASE-15583</a> 198 */ 199 @Deprecated 200 String getOwnerString(); 201 202 /** 203 * Getter for accessing the metadata associated with the key. 204 * 205 * @param key The key. 206 * @return A clone value. Null if no mapping for the key 207 */ 208 Bytes getValue(Bytes key); 209 210 /** 211 * Getter for accessing the metadata associated with the key. 212 * 213 * @param key The key. 214 * @return A clone value. Null if no mapping for the key 215 */ 216 byte[] getValue(byte[] key); 217 218 /** 219 * Getter for accessing the metadata associated with the key. 220 * 221 * @param key The key. 222 * @return Null if no mapping for the key 223 */ 224 String getValue(String key); 225 226 /** 227 * @return Getter for fetching an unmodifiable map. 228 */ 229 Map<Bytes, Bytes> getValues(); 230 231 /** 232 * Check if the table has an attached co-processor represented by the name 233 * className 234 * 235 * @param classNameToMatch - Class name of the co-processor 236 * @return true of the table has a co-processor className 237 */ 238 boolean hasCoprocessor(String classNameToMatch); 239 240 /** 241 * Checks to see if this table contains the given column family 242 * 243 * @param name Family name or column name. 244 * @return true if the table contains the specified family name 245 */ 246 boolean hasColumnFamily(final byte[] name); 247 248 /** 249 * @return true if the read-replicas memstore replication is enabled. 250 */ 251 boolean hasRegionMemStoreReplication(); 252 253 /** 254 * Check if the compaction enable flag of the table is true. If flag is false 255 * then no minor/major compactions will be done in real. 256 * 257 * @return true if table compaction enabled 258 */ 259 boolean isCompactionEnabled(); 260 261 /** 262 * Check if the split enable flag of the table is true. If flag is false 263 * then no region split will be done. 264 * 265 * @return true if table region split enabled 266 */ 267 boolean isSplitEnabled(); 268 269 /** 270 * Check if the merge enable flag of the table is true. If flag is false 271 * then no region merge will be done. 272 * 273 * @return true if table region merge enabled 274 */ 275 boolean isMergeEnabled(); 276 277 /** 278 * Checks if this table is <code> hbase:meta </code> region. 279 * 280 * @return true if this table is <code> hbase:meta </code> region 281 */ 282 boolean isMetaRegion(); 283 284 /** 285 * Checks if the table is a <code>hbase:meta</code> table 286 * 287 * @return true if table is <code> hbase:meta </code> region. 288 */ 289 boolean isMetaTable(); 290 291 /** 292 * Check if normalization enable flag of the table is true. If flag is false 293 * then region normalizer won't attempt to normalize this table. 294 * 295 * @return true if region normalization is enabled for this table 296 */ 297 boolean isNormalizationEnabled(); 298 299 /** 300 * Check if there is the target region count. If so, the normalize plan will 301 * be calculated based on the target region count. 302 * 303 * @return target region count after normalize done 304 */ 305 int getNormalizerTargetRegionCount(); 306 307 /** 308 * Check if there is the target region size. If so, the normalize plan will 309 * be calculated based on the target region size. 310 * 311 * @return target region size after normalize done 312 */ 313 long getNormalizerTargetRegionSize(); 314 315 /** 316 * Check if the readOnly flag of the table is set. If the readOnly flag is set 317 * then the contents of the table can only be read from but not modified. 318 * 319 * @return true if all columns in the table should be read only 320 */ 321 boolean isReadOnly(); 322 323 /** 324 * @return Name of this table and then a map of all of the column family descriptors (with only 325 * the non-default column family attributes) 326 */ 327 String toStringCustomizedValues(); 328 329 /** 330 * Check if any of the table's cfs' replication scope are set to 331 * {@link HConstants#REPLICATION_SCOPE_GLOBAL}. 332 * @return {@code true} if we have, otherwise {@code false}. 333 */ 334 default boolean hasGlobalReplicationScope() { 335 return Stream.of(getColumnFamilies()) 336 .anyMatch(cf -> cf.getScope() == HConstants.REPLICATION_SCOPE_GLOBAL); 337 } 338 339 /** 340 * Check if the table's cfs' replication scope matched with the replication state 341 * @param enabled replication state 342 * @return true if matched, otherwise false 343 */ 344 default boolean matchReplicationScope(boolean enabled) { 345 boolean hasEnabled = false; 346 boolean hasDisabled = false; 347 348 for (ColumnFamilyDescriptor cf : getColumnFamilies()) { 349 if (cf.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) { 350 hasDisabled = true; 351 } else { 352 hasEnabled = true; 353 } 354 } 355 356 if (hasEnabled && hasDisabled) { 357 return false; 358 } 359 if (hasEnabled) { 360 return enabled; 361 } 362 return !enabled; 363 } 364 365 /** 366 * Get the region server group this table belongs to. The regions of this table will be placed 367 * only on the region servers within this group. If not present, will be placed on 368 * {@link org.apache.hadoop.hbase.rsgroup.RSGroupInfo#DEFAULT_GROUP}. 369 */ 370 Optional<String> getRegionServerGroup(); 371}