diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 20884edf836e..ec5a04a63a05 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Connection; @@ -169,7 +170,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, LOG.debug("currentLogFile: " + log.getPath().toString()); if (AbstractFSWALProvider.isMetaFile(log.getPath())) { if (LOG.isDebugEnabled()) { - LOG.debug("Skip hbase:meta log file: " + log.getPath().getName()); + LOG.debug("Skip {} log file: {}", MetaTableName.getInstance(), log.getPath().getName()); } continue; } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java index 6c021bf622a5..d316075979a1 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; @@ -131,7 +131,7 @@ public static void updateMetaWithFavoredNodesInfo( puts.add(put); } } - try (Table table = connection.getTable(TableName.META_TABLE_NAME)) { + try (Table table = connection.getTable(MetaTableName.getInstance())) { table.put(puts); } LOG.info("Added " + puts.size() + " region favored nodes in META"); diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 02c18c73bfb5..e54ce7478e99 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -170,9 +171,10 @@ private void processMetaRecord(Result result) throws IOException { * Initialize the region assignment snapshot by scanning the hbase:meta table */ public void initialize() throws IOException { - LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot"); + LOG.info("Start to scan {} for the current region assignment snapshot", + MetaTableName.getInstance()); // Scan hbase:meta to pick up user regions - try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME); + try (Table metaTable = connection.getTable(MetaTableName.getInstance()); ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) { for (;;) { Result result = scanner.next(); @@ -187,7 +189,8 @@ public void initialize() throws IOException { } } } - LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot"); + LOG.info("Finished scanning {} for the current region assignment snapshot", + MetaTableName.getInstance()); } private void addRegion(RegionInfo regionInfo) { diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java index 3548571286c0..08ea314ae337 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java @@ -27,6 +27,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -68,7 +69,7 @@ public static void setup() { // Create regions List allRegions = new ArrayList<>(); for (int i = 0; i < NUM_REGIONS; i++) { - TableName tableName = i < 3 ? TableName.META_TABLE_NAME : NON_META_TABLE_NAME; + TableName tableName = i < 3 ? MetaTableName.getInstance() : NON_META_TABLE_NAME; byte[] startKey = new byte[1]; startKey[0] = (byte) i; byte[] endKey = new byte[1]; @@ -95,7 +96,7 @@ public void testMetaTableIsolation() { } private boolean isMetaTableIsolated(BalancerClusterState cluster) { - return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta"); + return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta"); } } diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java index 0ea739faf78b..9f552debf324 100644 --- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java +++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java @@ -28,6 +28,7 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -72,7 +73,7 @@ public static void setup() { for (int i = 0; i < NUM_REGIONS; i++) { TableName tableName; if (i < 1) { - tableName = TableName.META_TABLE_NAME; + tableName = MetaTableName.getInstance(); } else if (i < 10) { tableName = SYSTEM_TABLE_NAME; } else { @@ -116,7 +117,7 @@ public void testTableIsolationAndReplicaDistribution() { * Validates whether all meta table regions are isolated. */ private boolean isMetaTableIsolated(BalancerClusterState cluster) { - return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta"); + return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta"); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java index 42bfd757e0d1..bf8513ee959d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java @@ -196,7 +196,7 @@ private static CompletableFuture>> getTableReg final AsyncTable metaTable, final TableName tableName, final boolean excludeOfflinedSplitParents) { CompletableFuture>> future = new CompletableFuture<>(); - if (TableName.META_TABLE_NAME.equals(tableName)) { + if (MetaTableName.getInstance().equals(tableName)) { future.completeExceptionally(new IOException( "This method can't be used to locate meta regions;" + " use MetaTableLocator instead")); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index e26fb837b89d..2ef315d2b4ac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.hbase.HConstants.NINES; import static org.apache.hadoop.hbase.HConstants.USE_META_REPLICAS; import static org.apache.hadoop.hbase.HConstants.ZEROES; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations; import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood; import static org.apache.hadoop.hbase.client.ConnectionConfiguration.HBASE_CLIENT_META_CACHE_INVALIDATE_INTERVAL; @@ -52,6 +51,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -238,14 +238,15 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { + .createSelector(replicaSelectorClass, MetaTableName.getInstance(), conn, () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = conn.registry.getMetaRegionLocations() .get(conn.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + LOG.error("Failed to get table {}'s region replication, ", + MetaTableName.getInstance(), e); } return numOfReplicas; }); @@ -427,7 +428,7 @@ private void locateInMeta(TableName tableName, LocateRequest req) { // do nothing } - conn.getTable(META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() { + conn.getTable(MetaTableName.getInstance()).scan(scan, new AdvancedScanResultConsumer() { private boolean completeNormally = false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 0e872a5b21da..cc10308b6327 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; @@ -36,6 +35,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -217,7 +217,7 @@ void clearCache(TableName tableName) { new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", tableName); - if (tableName.equals(META_TABLE_NAME)) { + if (tableName.equals(MetaTableName.getInstance())) { metaRegionLocator.clearCache(); } else { nonMetaRegionLocator.clearCache(tableName); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java index b7ec7fcd8725..32349a64651d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java @@ -25,6 +25,7 @@ import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -63,7 +64,7 @@ public CompletableFuture> getAllRegionLocations() { .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); } CompletableFuture> future = ClientMetaTableAccessor - .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); + .getTableHRegionLocations(conn.getTable(MetaTableName.getInstance()), tableName); addListener(future, (locs, error) -> locs.forEach(loc -> { // the cache assumes that all locations have a serverName. only add if that's true if (loc.getServerName() != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index d6d8e00f7822..5e629839782f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -120,7 +121,7 @@ private static int checkReplicaId(int regionId) { this.replicaId = checkReplicaId(replicaId); this.offLine = offLine; this.regionName = RegionInfo.createRegionName(this.tableName, this.startKey, this.regionId, - this.replicaId, !this.tableName.equals(TableName.META_TABLE_NAME)); + this.replicaId, !this.tableName.equals(MetaTableName.getInstance())); this.encodedName = RegionInfo.encodeRegionName(this.regionName); this.hashCode = generateHashCode(this.tableName, this.startKey, this.endKey, this.regionId, this.replicaId, this.offLine, this.regionName); @@ -232,7 +233,7 @@ public boolean containsRow(byte[] row) { /** Returns true if this region is a meta region */ @Override public boolean isMetaRegion() { - return tableName.equals(TableName.META_TABLE_NAME); + return tableName.equals(MetaTableName.getInstance()); } /** Returns True if has been split and has daughters. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 83780a4a1219..b9228c66394e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.HIGH_QOS; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; import static org.apache.hadoop.hbase.util.FutureUtils.unwrapCompletionException; @@ -57,6 +56,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionMetrics; @@ -403,7 +403,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { AsyncAdminBuilderBase builder) { this.connection = connection; this.retryTimer = retryTimer; - this.metaTable = connection.getTable(META_TABLE_NAME); + this.metaTable = connection.getTable(MetaTableName.getInstance()); this.rpcTimeoutNs = builder.rpcTimeoutNs; this.operationTimeoutNs = builder.operationTimeoutNs; this.pauseNs = builder.pauseNs; @@ -995,7 +995,7 @@ List> adminCall(controller, stub, @Override public CompletableFuture> getRegions(TableName tableName) { - if (tableName.equals(META_TABLE_NAME)) { + if (tableName.equals(MetaTableName.getInstance())) { return connection.registry.getMetaRegionLocations() .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) .collect(Collectors.toList())); @@ -1286,7 +1286,7 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa * List all region locations for the specific table. */ private CompletableFuture> getTableHRegionLocations(TableName tableName) { - if (TableName.META_TABLE_NAME.equals(tableName)) { + if (MetaTableName.getInstance().equals(tableName)) { CompletableFuture> future = new CompletableFuture<>(); addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> { if (err != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 10c554e26f79..bc3b48a54a28 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.ByteArrayHashKey; @@ -431,7 +432,7 @@ static byte[] toByteArray(RegionInfo ri) { */ static String prettyPrint(final String encodedRegionName) { if (encodedRegionName.equals("1028785192")) { - return encodedRegionName + "/hbase:meta"; + return encodedRegionName + "/" + MetaTableName.getInstance(); } return encodedRegionName; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index ef927fd3a55b..1c2aab455b55 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +44,7 @@ public class RegionInfoBuilder { // TODO: How come Meta regions still do not have encoded region names? Fix. // hbase:meta,,1.1588230740 should be the hbase:meta first region name. public static final RegionInfo FIRST_META_REGIONINFO = - new MutableRegionInfo(1L, TableName.META_TABLE_NAME, RegionInfo.DEFAULT_REPLICA_ID); + new MutableRegionInfo(1L, MetaTableName.getInstance(), RegionInfo.DEFAULT_REPLICA_ID); private final TableName tableName; private byte[] startKey = HConstants.EMPTY_START_ROW; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java index 3f353b5799d4..b8288e709cb7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java @@ -20,7 +20,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; @@ -82,7 +82,7 @@ public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuratio */ public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) { boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true); - if (displayKey || ri.getTable().equals(TableName.META_TABLE_NAME)) { + if (displayKey || ri.getTable().equals(MetaTableName.getInstance())) { return ri.getRegionName(); } else { // create a modified regionname with the startkey replaced but preserving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index eed1a40a2c2f..a1b766696250 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -38,6 +38,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.HBaseException; @@ -616,7 +617,7 @@ private ModifyableTableDescriptor(final TableName name, families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c))); this.values.putAll(values); this.values.put(IS_META_KEY, - new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME))))); + new Bytes(Bytes.toBytes(Boolean.toString(name.equals(MetaTableName.getInstance()))))); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index 40ff0373c36c..8f0c11a03f6b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -22,7 +22,7 @@ import java.security.PrivilegedExceptionAction; import java.util.concurrent.CompletableFuture; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncTable; import org.apache.hadoop.hbase.client.Connection; @@ -73,7 +73,7 @@ private static void injectFault() throws ServiceException { future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException)); return future; } - AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); + AsyncTable table = conn.getTable(MetaTableName.getInstance()); table. coprocessorService( AuthenticationProtos.AuthenticationService::newStub, @@ -102,7 +102,7 @@ static Token obtainToken(Connection conn) throws try { injectFault(); - meta = conn.getTable(TableName.META_TABLE_NAME); + meta = conn.getTable(MetaTableName.getInstance()); CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 60175137ad2c..7cb4ffe285eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerTask; @@ -3325,7 +3326,7 @@ public static String toLockJson(List lockedRes long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId; - if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { + if (tableName.equals(MetaTableName.getInstance()) && replicaId == defaultReplicaId) { return RegionInfoBuilder.FIRST_META_REGIONINFO; } byte[] startKey = null; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index f65c7ccb6e75..2206a800767e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; @@ -195,7 +196,7 @@ public void testCreateSystemTable() { // that we pass the correct priority @Test public void testCreateMetaTable() { - conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) + conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()) .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join(); verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS), any(CreateTableRequest.class), any()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index a7df92999d08..d519870080bf 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -45,9 +45,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MatcherPredicate; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; @@ -85,7 +85,7 @@ public class TestAsyncRegionLocatorTracing { @Before public void setUp() throws IOException { - RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).build(); locs = new RegionLocations( new HRegionLocation(metaRegionInfo, ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())), @@ -147,30 +147,30 @@ public void testClearCacheServerName() { @Test public void testClearCacheTableName() { - conn.getLocator().clearCache(TableName.META_TABLE_NAME); + conn.getLocator().clearCache(MetaTableName.getInstance()); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + buildTableAttributesMatcher(MetaTableName.getInstance()))); } @Test public void testGetRegionLocation() { - conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, + conn.getLocator().getRegionLocation(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation"); assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), + buildTableAttributesMatcher(MetaTableName.getInstance()), hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @Test public void testGetRegionLocations() { - conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, + conn.getLocator().getRegionLocations(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations"); String[] expectedRegions = @@ -178,7 +178,7 @@ public void testGetRegionLocations() { .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( + buildTableAttributesMatcher(MetaTableName.getInstance()), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index cb5431c35d3e..34e9ba201838 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; @@ -237,7 +238,7 @@ public void testGetSystemTable() { @Test public void testGetMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).get(new Get(Bytes.toBytes(0))).join(); + conn.getTable(MetaTableName.getInstance()).get(new Get(Bytes.toBytes(0))).join(); verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any()); } @@ -268,7 +269,7 @@ public void testPutSystemTable() { @Test public void testPutMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).put(new Put(Bytes.toBytes(0)) + conn.getTable(MetaTableName.getInstance()).put(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -296,7 +297,7 @@ public void testDeleteSystemTable() { @Test public void testDeleteMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).delete(new Delete(Bytes.toBytes(0))).join(); + conn.getTable(MetaTableName.getInstance()).delete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -327,7 +328,7 @@ public void testAppendSystemTable() { @Test public void testAppendMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).append(new Append(Bytes.toBytes(0)) + conn.getTable(MetaTableName.getInstance()).append(new Append(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -355,7 +356,7 @@ public void testIncrementSystemTable() { @Test public void testIncrementMetaTable() { - conn.getTable(TableName.META_TABLE_NAME) + conn.getTable(MetaTableName.getInstance()) .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -393,7 +394,7 @@ public void testCheckAndPutSystemTable() { @Test public void testCheckAndPutMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) + conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) .join(); @@ -426,7 +427,7 @@ public void testCheckAndDeleteSystemTable() { @Test public void testCheckAndDeleteMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) + conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) .join(); @@ -467,7 +468,7 @@ public void testCheckAndMutateSystemTable() throws IOException { @Test public void testCheckAndMutateMetaTable() throws IOException { - conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) + conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) .qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v")) .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) .join(); @@ -555,7 +556,7 @@ public void testScanSystemTable() throws Exception { @Test public void testScanMetaTable() throws Exception { CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS); - testForTable(TableName.META_TABLE_NAME, renewFuture, Optional.empty()); + testForTable(MetaTableName.getInstance(), renewFuture, Optional.empty()); } private void testForTable(TableName tableName, CompletableFuture renewFuture, @@ -598,7 +599,7 @@ public void testBatchSystemTable() { @Test public void testBatchMetaTable() { - conn.getTable(TableName.META_TABLE_NAME).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))) + conn.getTable(MetaTableName.getInstance()).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))) .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java index 40617d78950a..c8e6b2158ce1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -34,6 +33,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -94,7 +94,7 @@ public void testCompactTableWithNullLocations() throws Exception { mockedMeta.when(() -> ClientMetaTableAccessor.getTableHRegionLocations(any(AsyncTable.class), any(TableName.class))).thenReturn(nullLocationsFuture); AsyncTable metaTable = mock(AsyncTable.class); - when(connection.getTable(META_TABLE_NAME)).thenReturn(metaTable); + when(connection.getTable(MetaTableName.getInstance())).thenReturn(metaTable); HashedWheelTimer hashedWheelTimer = mock(HashedWheelTimer.class); AsyncAdminBuilderBase asyncAdminBuilderBase = mock(AsyncAdminBuilderBase.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index f74b79a0672e..e01b3b741dcc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -27,6 +27,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -141,7 +142,7 @@ public void testContainsRange() { @Test public void testContainsRangeForMetaTable() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build(); byte[] startRow = HConstants.EMPTY_START_ROW; byte[] row1 = Bytes.toBytes("a,a,0"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 53f33845ef7d..d09f7a225a6e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.regex.Pattern; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.HBaseException; @@ -59,7 +60,7 @@ public class TestTableDescriptorBuilder { @Test(expected = IOException.class) public void testAddCoprocessorTwice() throws IOException { String cpName = "a.b.c.d"; - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setCoprocessor(cpName) + TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setCoprocessor(cpName) .setCoprocessor(cpName).build(); } @@ -67,7 +68,7 @@ public void testAddCoprocessorTwice() throws IOException { public void testPb() throws DeserializationException, IOException { final int v = 123; TableDescriptor htd = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v) + TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setMaxFileSize(v) .setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build(); byte[] bytes = TableDescriptorBuilder.toByteArray(htd); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java index 0e6a53ca7c47..49eb3b9cce62 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java @@ -787,7 +787,7 @@ public static CellComparator getCellComparator(TableName tableName) { */ public static CellComparator getCellComparator(byte[] tableName) { // FYI, TableName.toBytes does not create an array; just returns existing array pointer. - return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) + return Bytes.equals(tableName, MetaTableName.getInstance().toBytes()) ? MetaCellComparator.META_COMPARATOR : CellComparatorImpl.COMPARATOR; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 1051686d32e8..2c0fcafabfca 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1209,7 +1209,7 @@ public enum OperationStatusCode { @Deprecated public static final List HBASE_NON_USER_TABLE_DIRS = Collections.unmodifiableList(Arrays.asList( - (String[]) ArrayUtils.addAll(new String[] { TableName.META_TABLE_NAME.getNameAsString() }, + (String[]) ArrayUtils.addAll(new String[] { MetaTableName.getInstance().getNameAsString() }, HBASE_NON_TABLE_DIRS.toArray()))); /** Health script related settings. */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java index 7f6e87ebf911..e2fd632be19f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java @@ -75,7 +75,7 @@ public static CellComparator getInnerStoreCellComparator(TableName tableName) { * @return CellComparator to use going off the {@code tableName} passed. */ public static CellComparator getInnerStoreCellComparator(byte[] tableName) { - return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes()) + return Bytes.equals(tableName, MetaTableName.getInstance().toBytes()) ? MetaCellComparator.META_COMPARATOR : InnerStoreCellComparator.INNER_STORE_COMPARATOR; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java new file mode 100644 index 000000000000..bbacedc28390 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.conf.Configuration; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Public +public class MetaTableName { + private static final Logger LOG = LoggerFactory.getLogger(MetaTableName.class); + private static volatile TableName instance; + + private MetaTableName() { + } + + /** + * Get the singleton instance of the meta table name. + * @return The meta table name instance + */ + public static TableName getInstance() { + if (instance == null) { + synchronized (MetaTableName.class) { + if (instance == null) { + instance = initializeHbaseMetaTableName(HBaseConfiguration.create()); + LOG.info("Meta table name initialized: {}", instance.getName()); + } + } + } + return instance; + } + + /** + * Initialize the meta table name from the given configuration. + * @param conf The configuration to use + * @return The initialized meta table name + */ + private static TableName initializeHbaseMetaTableName(Configuration conf) { + TableName metaTableName = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + LOG.info("Meta table suffix value: {}", metaTableName); + return metaTableName; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index b6d854c13784..442dc10d8721 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -25,6 +25,9 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -43,8 +46,11 @@ *

*/ @InterfaceAudience.Public +@InterfaceStability.Stable public final class TableName implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(TableName.class); + /** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */ private static final Set tableCache = new CopyOnWriteArraySet<>(); @@ -66,7 +72,8 @@ public final class TableName implements Comparable { + NAMESPACE_DELIM + ")?)" + "(?:" + VALID_TABLE_QUALIFIER_REGEX + "))"; /** The hbase:meta table's name. */ - public static final TableName META_TABLE_NAME = + @Deprecated + public static TableName META_TABLE_NAME = valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); /** @@ -87,7 +94,7 @@ public final class TableName implements Comparable { /** Returns True if tn is the hbase:meta table name. */ public static boolean isMetaTableName(final TableName tn) { - return tn.equals(TableName.META_TABLE_NAME); + return tn.equals(MetaTableName.getInstance()); } /** @@ -288,8 +295,8 @@ private TableName(ByteBuffer namespace, ByteBuffer qualifier) throws IllegalArgu throw new IllegalArgumentException(OLD_ROOT_STR + " has been deprecated."); } if (qualifierAsString.equals(OLD_META_STR)) { - throw new IllegalArgumentException( - OLD_META_STR + " no longer exists. The table has been " + "renamed to " + META_TABLE_NAME); + throw new IllegalArgumentException(OLD_META_STR + " no longer exists. The table has been " + + "renamed to " + MetaTableName.getInstance()); } if (Bytes.equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME, namespace)) { diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java index 553b39311369..5c143d8ee065 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java @@ -196,39 +196,37 @@ public void testMetaComparisons2() { long now = EnvironmentEdgeManager.currentTime(); CellComparator c = MetaCellComparator.META_COMPARATOR; assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))) - == 0); + createByteBufferKeyValueFromKeyValue(new KeyValue( + Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue( + Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))) == 0); Cell a = createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)); Cell b = createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now)); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now)); assertTrue(c.compare(a, b) < 0); assertTrue(c.compare( - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now)), - createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))) - > 0); + createByteBufferKeyValueFromKeyValue(new KeyValue( + Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now)), + createByteBufferKeyValueFromKeyValue(new KeyValue( + Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))) > 0); assertTrue(c.compare( createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)), createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now))) + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now))) == 0); assertTrue(c.compare( createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)), createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now))) + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now))) < 0); assertTrue(c.compare( createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)), createByteBufferKeyValueFromKeyValue( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now))) + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now))) > 0); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 1644a6f1fce7..1e65b75a9777 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -198,31 +198,32 @@ public void testKeyValueBorderCases() { private void metacomparisons(final CellComparatorImpl c) { long now = EnvironmentEdgeManager.currentTime(); assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)) + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)) == 0); KeyValue a = - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now); KeyValue b = - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now); assertTrue(c.compare(a, b) < 0); assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)) + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)) > 0); } private void comparisons(final CellComparatorImpl c) { long now = EnvironmentEdgeManager.currentTime(); assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) == 0); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)) + == 0); assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)) < 0); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)) < 0); assertTrue(c.compare( - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now), - new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) > 0); + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now), + new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)) > 0); } @Test diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java index a459074ba27d..54ceeecfec21 100644 --- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java +++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java @@ -222,16 +222,16 @@ public void testRegionStatesCount() throws Exception { ClusterMetrics metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(), 0); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getClosedRegions(), 0); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getClosedRegions(), 0); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getSplitRegions(), 0); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getSplitRegions(), 0); Assert.assertEquals( metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1); @@ -253,12 +253,12 @@ public void testRegionStatesWithSplit() throws Exception { ClusterMetrics metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(), 0); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1); Assert.assertEquals( metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1); @@ -273,12 +273,12 @@ public void testRegionStatesWithSplit() throws Exception { metrics = ADMIN.getClusterMetrics(); Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(), + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(), 0); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1); Assert.assertEquals( - metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1); + metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1); Assert.assertEquals( metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0); Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 2); diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java index 5e8447c2ad81..267b78dade13 100644 --- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java +++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java @@ -42,8 +42,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.slf4j.Logger; @@ -165,7 +165,7 @@ public void startHBase() throws IOException { int attemptsLeft = 10; while (attemptsLeft-- > 0) { try { - testUtil.getConnection().getTable(TableName.META_TABLE_NAME); + testUtil.getConnection().getTable(MetaTableName.getInstance()); } catch (Exception e) { LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e); Threads.sleep(1000); diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java index 144ea6503b06..dc7d025796bd 100644 --- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java +++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -124,7 +125,7 @@ protected int doWork() throws Exception { LOG.debug("Trying to scan meta"); - Table metaTable = connection.getTable(TableName.META_TABLE_NAME); + Table metaTable = connection.getTable(MetaTableName.getInstance()); ResultScanner scanner = metaTable.getScanner(new Scan()); Result result; while ((result = scanner.next()) != null) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 53ed8a25ed0e..7aea9b356259 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @@ -49,7 +50,7 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_SIZE_NAME = "hlogSplitSize"; String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()"; - String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split"; + String META_SPLIT_SIZE_DESC = "Size of " + MetaTableName.getInstance() + " WAL files being split"; String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java index a8c3a16d13dc..fd07d7e1dc6a 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java @@ -56,7 +56,7 @@ public static void setUp() throws Exception { 1000); // Make sure there are three servers. util.initializeCluster(3); - HBaseTestingUtil.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(util.getAdmin(), MetaTableName.getInstance(), 3); ZKWatcher zkw = util.getZooKeeperWatcher(); Configuration conf = util.getConfiguration(); String baseZNode = diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 2bb87ca8f2f6..fbc98f006393 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.IntegrationTestingUtility; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceExistException; import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.TableExistsException; @@ -184,7 +185,7 @@ private static void setupActions() throws IOException { // Set up the action that will move the regions of meta. moveMetaRegionsAction = new MoveRegionsOfTableAction(sleepTime, - MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, TableName.META_TABLE_NAME); + MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, MetaTableName.getInstance()); // Set up the action that will move the regions of our table. moveRegionAction = new MoveRegionsOfTableAction(sleepTime, diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index d4ccac901436..b861c29a9bcc 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; @@ -256,7 +257,7 @@ public void testSimpleCase() throws Throwable { @Test public void testMetaExport() throws Throwable { String[] args = - new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; + new String[] { MetaTableName.getInstance().getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index a115fd17af3f..5ddc23d36554 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.rest.client.Client; @@ -55,7 +56,7 @@ public class TestStatusResource { private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class); - private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1"); + private static final byte[] META_REGION_NAME = Bytes.toBytes(MetaTableName.getInstance() + ",,1"); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility(); diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java index 8310232890dd..1db8c371593c 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java @@ -23,7 +23,7 @@ import java.util.Iterator; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -89,8 +89,8 @@ protected StorageClusterStatusModel buildTestModel() { model.addLiveNode("test1", 1245219839331L, 128, 1024).addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1); model.addLiveNode("test2", 1245239331198L, 512, 1024).addRegion( - Bytes.toBytes(TableName.META_TABLE_NAME + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, - 1, 1); + Bytes.toBytes(MetaTableName.getInstance() + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, + 1, 1, 1); return model; } @@ -128,7 +128,7 @@ protected void checkModel(StorageClusterStatusModel model) { assertEquals(1024, node.getMaxHeapSizeMB()); regions = node.getRegions().iterator(); region = regions.next(); - assertEquals(Bytes.toString(region.getName()), TableName.META_TABLE_NAME + ",,1246000043724"); + assertEquals(Bytes.toString(region.getName()), MetaTableName.getInstance() + ",,1246000043724"); assertEquals(1, region.getStores()); assertEquals(1, region.getStorefiles()); assertEquals(0, region.getStorefileSizeMB()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 05b049e27dbc..24bbfb5079f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -151,7 +151,7 @@ public static Table getMetaHTable(final Connection connection) throws IOExceptio if (connection.isClosed()) { throw new IOException("connection is closed"); } - return connection.getTable(TableName.META_TABLE_NAME); + return connection.getTable(MetaTableName.getInstance()); } /** @@ -366,7 +366,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { public static List> getTableRegionsAndLocations( Connection connection, @Nullable final TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException { - if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName != null && tableName.equals(MetaTableName.getInstance())) { throw new IOException( "This method can't be used to locate meta regions; use MetaTableLocator instead"); } @@ -592,7 +592,7 @@ public static PairOfSameType getDaughterRegions(Result data) { */ @Nullable public static TableState getTableState(Connection conn, TableName tableName) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(MetaTableName.getInstance())) { return new TableState(tableName, TableState.State.ENABLED); } Table metaHTable = getMetaHTable(conn); @@ -859,7 +859,7 @@ public static void addRegionsToMeta(Connection connection, List regi private static void updateTableState(Connection connection, TableState state) throws IOException { Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime()); putToMetaTable(connection, put); - LOG.info("Updated {} in hbase:meta", state); + LOG.info("Updated {} in {}", state, MetaTableName.getInstance()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java index 3cac1f319dae..aa934cc6d462 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java @@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -133,7 +133,7 @@ private String getRegionIdFromOp(Row op) { } private boolean isMetaTableOp(ObserverContext e) { - return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable()); + return MetaTableName.getInstance().equals(e.getEnvironment().getRegionInfo().getTable()); } private void clientMetricRegisterAndMark() { @@ -268,7 +268,7 @@ public void start(CoprocessorEnvironment env) throws IOException { env instanceof RegionCoprocessorEnvironment && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null && ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() - .equals(TableName.META_TABLE_NAME) + .equals(MetaTableName.getInstance()) ) { RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env; registry = regionCoprocessorEnv.getMetricRegistryForRegionServer(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 22d3ab69b51c..d11b244448b4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.PleaseRestartMasterException; @@ -1092,7 +1093,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE startupTaskGroup.addTask("Initializing meta table if this is a new deploy"); InitMetaProcedure initMetaProc = null; // Print out state of hbase:meta on startup; helps debugging. - if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) { + if ( + !this.assignmentManager.getRegionStates().hasTableRegionStates(MetaTableName.getInstance()) + ) { Optional optProc = procedureExecutor.getProcedures().stream() .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); initMetaProc = optProc.orElseGet(() -> { @@ -1156,7 +1159,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE return; } - TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaDescriptor = tableDescriptors.get(MetaTableName.getInstance()); final ColumnFamilyDescriptor tableFamilyDesc = metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY); final ColumnFamilyDescriptor replBarrierFamilyDesc = @@ -1174,16 +1177,17 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE if (conf.get(HConstants.META_REPLICAS_NUM) != null) { int replicasNumInConf = conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM); - TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaDesc = tableDescriptors.get(MetaTableName.getInstance()); if (metaDesc.getRegionReplication() != replicasNumInConf) { // it is possible that we already have some replicas before upgrading, so we must set the // region replication number in meta TableDescriptor directly first, without creating a // ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas. int existingReplicasCount = - assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); + assignmentManager.getRegionStates().getRegionsOfTable(MetaTableName.getInstance()).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" - + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info( + "Update replica count of {} from {}(in TableDescriptor)" + " to {}(existing ZNodes)", + MetaTableName.getInstance(), metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); @@ -1193,7 +1197,8 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE LOG.info( "The {} config is {} while the replica count in TableDescriptor is {}" + " for hbase:meta, altering...", - HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(), + MetaTableName.getInstance()); procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(replicasNumInConf).build(), @@ -1423,7 +1428,7 @@ private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor) TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor) .setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf)) .setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build(); - long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false); + long pid = this.modifyTable(MetaTableName.getInstance(), () -> newMetaDesc, 0, 0, false); int tries = 30; while ( !(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning() @@ -2586,7 +2591,7 @@ private void startActiveMasterManager(int infoPort) throws KeeperException { } private static boolean isCatalogTable(final TableName tableName) { - return tableName.equals(TableName.META_TABLE_NAME); + return tableName.equals(MetaTableName.getInstance()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java index 464dfaca7035..c77d2c0c1a85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.AnnotationReadingPriorityFunction; import org.apache.yetus.audience.InterfaceAudience; @@ -84,7 +85,7 @@ protected int getBasePriority(RequestHeader header, Message param) { if (rst.getRegionInfoList() != null) { for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) { TableName tn = ProtobufUtil.toTableName(info.getTableName()); - if (TableName.META_TABLE_NAME.equals(tn)) { + if (MetaTableName.getInstance().equals(tn)) { return META_TRANSITION_QOS; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java index 854c21da2bc7..3edfc1eb67a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncClusterConnection; @@ -605,7 +606,7 @@ public static void printAssignmentPlan(FavoredNodesPlan plan) { */ public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException { try { - LOG.info("Start to update the hbase:meta with the new assignment plan"); + LOG.info("Started updating {} with the new assignment plan", MetaTableName.getInstance()); Map> assignmentMap = plan.getAssignmentMap(); Map> planToUpdate = new HashMap<>(assignmentMap.size()); Map regionToRegionInfoMap = @@ -619,6 +620,7 @@ public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException } catch (Exception e) { LOG.error( "Failed to update hbase:meta with the new assignment" + "plan because " + e.getMessage()); + LOG.info("Updated {} with the new assignment plan", MetaTableName.getInstance()); } } @@ -690,14 +692,14 @@ private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws I } public void updateAssignmentPlan(FavoredNodesPlan plan) throws IOException { - LOG.info("Start to update the new assignment plan for the hbase:meta table and" - + " the region servers"); + LOG.info("Started updating the new assignment plan for {} and the region servers", + MetaTableName.getInstance()); // Update the new assignment plan to META updateAssignmentPlanToMeta(plan); // Update the new assignment plan to Region Servers updateAssignmentPlanToRegionServers(plan); - LOG.info("Finish to update the new assignment plan for the hbase:meta table and" - + " the region servers"); + LOG.info("Finished updating the new assignment plan for {} and the region servers", + MetaTableName.getInstance()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 4d18b2ad8f4e..9903c84785e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -79,7 +80,7 @@ private void tryMigrateNamespaceTable() throws IOException, InterruptedException if (!opt.isPresent()) { // the procedure is not present, check whether have the ns family in meta table TableDescriptor metaTableDesc = - masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME); + masterServices.getTableDescriptors().get(MetaTableName.getInstance()); if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { // normal case, upgrading is done or the cluster is created with 3.x code migrationDone = true; @@ -106,7 +107,7 @@ private void addToCache(Result result, byte[] family, byte[] qualifier) throws I } private void loadFromMeta() throws IOException { - try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME); + try (Table table = masterServices.getConnection().getTable(MetaTableName.getInstance()); ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) { for (Result result;;) { result = scanner.next(); @@ -204,7 +205,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY, HConstants.NAMESPACE_COL_DESC_QUALIFIER, ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray()); - try (Table table = conn.getTable(TableName.META_TABLE_NAME)) { + try (Table table = conn.getTable(MetaTableName.getInstance())) { table.put(put); } } @@ -212,7 +213,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns public void deleteNamespace(String namespaceName) throws IOException { checkMigrationDone(); Delete d = new Delete(Bytes.toBytes(namespaceName)); - try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = masterServices.getConnection().getTable(MetaTableName.getInstance())) { table.delete(d); } cache.remove(namespaceName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 6ad32623be1a..cdd54d616bee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Result; @@ -86,7 +87,7 @@ public boolean isTableState(TableName tableName, TableState.State... states) { } public void setDeletedTable(TableName tableName) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(MetaTableName.getInstance())) { // Can't delete the hbase:meta table. return; } @@ -147,7 +148,7 @@ public TableState getTableState(TableName tableName) throws IOException { } private void updateMetaState(TableName tableName, TableState.State newState) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(MetaTableName.getInstance())) { if ( TableState.State.DISABLING.equals(newState) || TableState.State.DISABLED.equals(newState) ) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java index 1b64ddea23e3..f352bbc8b28f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -354,7 +355,7 @@ public void start() throws IOException, KeeperException { if (RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) { setMetaAssigned(regionInfo, state == State.OPEN); } - LOG.debug("Loaded hbase:meta {}", regionNode); + LOG.debug("Loaded {} {}", MetaTableName.getInstance(), regionNode); }, result); } } @@ -1948,8 +1949,8 @@ private void checkMetaLoaded(RegionInfo hri) throws PleaseHoldException { boolean meta = isMetaRegion(hri); boolean metaLoaded = isMetaLoaded(); if (!meta && !metaLoaded) { - throw new PleaseHoldException( - "Master not fully online; hbase:meta=" + meta + ", metaLoaded=" + metaLoaded); + throw new PleaseHoldException("Master not fully online; " + MetaTableName.getInstance() + "=" + + meta + ", metaLoaded=" + metaLoaded); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java index c370fed9d9c0..e6891d08f075 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.MetaMutationAnnotation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; @@ -718,8 +719,10 @@ private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOExcept RegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { - LOG.error("Row key of mutation from coprocessor is not parsable as region name. " - + "Mutations from coprocessor should only be for hbase:meta table.", e); + LOG.error( + "Row key of mutation from coprocessor is not parsable as region name. " + + "Mutations from coprocessor should only be for {} table.", + MetaTableName.getInstance(), e); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java index 5987fc7537b4..4d42ad619255 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -169,9 +170,10 @@ public static void visitMetaEntry(final RegionStateVisitor visitor, final Result final long openSeqNum = hrl.getSeqNum(); LOG.debug( - "Load hbase:meta entry region={}, regionState={}, lastHost={}, " + "Load {} entry region={}, regionState={}, lastHost={}, " + "regionLocation={}, openSeqNum={}", - regionInfo.getEncodedName(), state, lastHost, regionLocation, openSeqNum); + MetaTableName.getInstance(), regionInfo.getEncodedName(), state, lastHost, regionLocation, + openSeqNum); visitor.visitRegionState(result, regionInfo, state, regionLocation, lastHost, openSeqNum); } } @@ -190,8 +192,8 @@ private Put generateUpdateRegionLocationPut(RegionStateNode regionStateNode) thr final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time); MetaTableAccessor.addRegionInfo(put, regionInfo); final StringBuilder info = - new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=") - .append(regionInfo.getEncodedName()).append(", regionState=").append(state); + new StringBuilder("pid=").append(pid).append(" updating ").append(MetaTableName.getInstance()) + .append(" row=").append(regionInfo.getEncodedName()).append(", regionState=").append(state); if (openSeqNum >= 0) { Preconditions.checkArgument(state == State.OPEN && regionLocation != null, "Open region should be on a server"); @@ -283,7 +285,7 @@ private CompletableFuture updateRegionLocation(RegionInfo regionInfo, Stat future = FutureUtils.failedFuture(e); } } else { - AsyncTable table = master.getAsyncConnection().getTable(TableName.META_TABLE_NAME); + AsyncTable table = master.getAsyncConnection().getTable(MetaTableName.getInstance()); future = table.put(put); } FutureUtils.addListener(future, (r, e) -> { @@ -330,7 +332,7 @@ private void multiMutate(RegionInfo ri, List mutations) throws IOExcep } MutateRowsRequest request = builder.build(); AsyncTable table = - master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME); + master.getConnection().toAsyncConnection().getTable(MetaTableName.getInstance()); CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub, (stub, controller, done) -> stub.mutateRows(controller, request, done), row); @@ -338,7 +340,7 @@ MutateRowsResponse> coprocessorService(MultiRowMutationService::newStub, } private Table getMetaTable() throws IOException { - return master.getConnection().getTable(TableName.META_TABLE_NAME); + return master.getConnection().getTable(MetaTableName.getInstance()); } private Result getRegionCatalogResult(RegionInfo region) throws IOException { @@ -504,7 +506,7 @@ public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException { + " in meta table, they are cleaned up already, Skip."); return; } - try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = master.getConnection().getTable(MetaTableName.getInstance())) { table.delete(delete); } LOG.info( @@ -694,7 +696,7 @@ public static State getRegionState(final Result r, RegionInfo regionInfo) { return State.valueOf(state); } catch (IllegalArgumentException e) { LOG.warn( - "BAD value {} in hbase:meta info:state column for region {} , " + "BAD value {} in " + MetaTableName.getInstance() + " info:state column for region {} , " + "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE", state, regionInfo.getEncodedName()); return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index 3d3d3d18de23..55ac3fa2a0bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; @@ -903,8 +904,10 @@ private void preSplitRegionBeforeMETA(final MasterProcedureEnv env) RegionInfo.parseRegionName(p.getRow()); } } catch (IOException e) { - LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as " - + "region name." + "Mutations from coprocessor should only for hbase:meta table."); + LOG.error( + "pid={} row key of mutation from coprocessor not parsable as region name. " + + "Mutations from coprocessor should only be for {} table.", + getProcId(), MetaTableName.getInstance()); throw e; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java index 77b1082d0f03..419f8fb15312 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; @@ -80,7 +81,7 @@ public synchronized void chore() { long deletedLastPushedSeqIds = 0; TableName tableName = null; List peerIds = null; - try (Table metaTable = conn.getTable(TableName.META_TABLE_NAME); + try (Table metaTable = conn.getTable(MetaTableName.getInstance()); ResultScanner scanner = metaTable.getScanner( new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) { for (;;) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java index 9f5ff857d4d8..f9fa67da83ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java @@ -33,6 +33,7 @@ import org.apache.commons.lang3.builder.ToStringStyle; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer; import org.apache.hadoop.hbase.client.AsyncConnection; @@ -156,7 +157,7 @@ public TableName getScanTable() { public Results getResults() { final AsyncTable asyncTable = - connection.getTable(TableName.META_TABLE_NAME); + connection.getTable(MetaTableName.getInstance()); return new Results(asyncTable.getScanner(buildScan())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java index 0d3ddb43abd4..a6cec33c3efb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -105,7 +106,7 @@ protected boolean initialChore() { scan(); } } catch (IOException e) { - LOG.warn("Failed initial janitorial scan of hbase:meta table", e); + LOG.warn("Failed initial janitorial scan of {} table", MetaTableName.getInstance(), e); return false; } return true; @@ -145,7 +146,7 @@ protected void chore() { + this.services.getServerManager().isClusterShutdown()); } } catch (IOException e) { - LOG.warn("Failed janitorial scan of hbase:meta table", e); + LOG.warn("Failed janitorial scan of {} table", MetaTableName.getInstance(), e); } } @@ -484,7 +485,7 @@ public static void main(String[] args) throws IOException { */ Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0.")); g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); - try (Table t = connection.getTable(TableName.META_TABLE_NAME)) { + try (Table t = connection.getTable(MetaTableName.getInstance())) { Result r = t.get(g); byte[] row = g.getRow(); row[row.length - 2] <<= row[row.length - 2]; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java index 1244d5bf3525..9b30d5198510 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -203,19 +204,19 @@ private static List createMetaEntries(final MasterServices masterSer .flatMap(List::stream).collect(Collectors.toList()); final List createMetaEntriesFailures = addMetaEntriesResults.stream() .filter(Either::hasRight).map(Either::getRight).collect(Collectors.toList()); - LOG.debug("Added {}/{} entries to hbase:meta", createMetaEntriesSuccesses.size(), - newRegionInfos.size()); + LOG.debug("Added {}/{} entries to {}", createMetaEntriesSuccesses.size(), newRegionInfos.size(), + MetaTableName.getInstance()); if (!createMetaEntriesFailures.isEmpty()) { LOG.warn( - "Failed to create entries in hbase:meta for {}/{} RegionInfo descriptors. First" + "Failed to create entries in {}} for {}/{} RegionInfo descriptors. First" + " failure message included; full list of failures with accompanying stack traces is" + " available at log level DEBUG. message={}", - createMetaEntriesFailures.size(), addMetaEntriesResults.size(), + MetaTableName.getInstance(), createMetaEntriesFailures.size(), addMetaEntriesResults.size(), createMetaEntriesFailures.get(0).getMessage()); if (LOG.isDebugEnabled()) { - createMetaEntriesFailures - .forEach(ioe -> LOG.debug("Attempt to fix region hole in hbase:meta failed.", ioe)); + createMetaEntriesFailures.forEach(ioe -> LOG + .debug("Attempt to fix region hole in {} failed.", MetaTableName.getInstance(), ioe)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java index c712f1cba672..c74be9de50d9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -137,8 +138,9 @@ private RegionInfo metaTableConsistencyCheck(Result metaTableRow) { if (!Bytes.equals(metaTableRow.getRow(), ri.getRegionName())) { LOG.warn( "INCONSISTENCY: Row name is not equal to serialized info:regioninfo content; " - + "row={} {}; See if RegionInfo is referenced in another hbase:meta row? Delete?", - Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString()); + + "row={} {}; See if RegionInfo is referenced in another {} row? Delete?", + Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString(), + MetaTableName.getInstance()); return null; } // Skip split parent region diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index e199f6d5971d..98f43871238e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; @@ -394,7 +395,8 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table long now = EnvironmentEdgeManager.currentTime(); List deletes = new ArrayList<>(); try ( - Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME); + Table metaTable = + env.getMasterServices().getConnection().getTable(MetaTableName.getInstance()); ResultScanner scanner = metaTable.getScanner(tableScan)) { for (;;) { Result result = scanner.next(); @@ -405,7 +407,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table } if (!deletes.isEmpty()) { LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from " - + TableName.META_TABLE_NAME); + + MetaTableName.getInstance()); metaTable.delete(deletes); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index e8999b886afd..8ce33c1574ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; @@ -111,7 +112,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTable ) { MasterFileSystem fs = env.getMasterFileSystem(); try (BufferedMutator mutator = env.getMasterServices().getConnection() - .getBufferedMutator(TableName.META_TABLE_NAME)) { + .getBufferedMutator(MetaTableName.getInstance())) { for (RegionInfo region : env.getAssignmentManager().getRegionStates() .getRegionsOfTable(tableName)) { long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId( @@ -230,7 +231,7 @@ public TableOperationType getTableOperationType() { */ private boolean prepareDisable(final MasterProcedureEnv env) throws IOException { boolean canTableBeDisabled = true; - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName.equals(MetaTableName.getInstance())) { setFailure("master-disable-table", new ConstraintException("Cannot disable " + this.tableName)); canTableBeDisabled = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java index 43d69361c2d2..d7a4ce95c4ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.ClientMetaTableAccessor; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Connection; @@ -102,14 +103,14 @@ List getRegionsOnCrashedServer(MasterProcedureEnv env) { MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(), visitor, null); } catch (IOException ioe) { - LOG.warn("Failed scan of hbase:meta for 'Unknown Servers'", ioe); + LOG.warn("Failed scan of {} for 'Unknown Servers'", MetaTableName.getInstance(), ioe); return ris; } // create the server state node too env.getAssignmentManager().getRegionStates().createServer(getServerName()); - LOG.info("Found {} mentions of {} in hbase:meta of OPEN/OPENING Regions: {}", - visitor.getReassigns().size(), getServerName(), visitor.getReassigns().stream() - .map(RegionInfo::getEncodedName).collect(Collectors.joining(","))); + LOG.info("Found {} mentions of {} in {} of OPEN/OPENING Regions: {}", + visitor.getReassigns().size(), getServerName(), MetaTableName.getInstance(), visitor + .getReassigns().stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(","))); return visitor.getReassigns(); } @@ -150,8 +151,8 @@ public boolean visit(Result result) throws IOException { RegionState rs = new RegionState(hrl.getRegion(), state, hrl.getServerName()); if (rs.isClosing()) { // Move region to CLOSED in hbase:meta. - LOG.info("Moving {} from CLOSING to CLOSED in hbase:meta", - hrl.getRegion().getRegionNameAsString()); + LOG.info("Moving {} from CLOSING to CLOSED in {}", + hrl.getRegion().getRegionNameAsString(), MetaTableName.getInstance()); try { MetaTableAccessor.updateRegionState(this.connection, hrl.getRegion(), RegionState.State.CLOSED); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java index 8b4901e90e85..2dfc652fc542 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -67,7 +68,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure proc) { // Meta Queue Lookup Helpers // ============================================================================ private MetaQueue getMetaQueue() { - MetaQueue node = AvlTree.get(metaMap, TableName.META_TABLE_NAME, META_QUEUE_KEY_COMPARATOR); + MetaQueue node = AvlTree.get(metaMap, MetaTableName.getInstance(), META_QUEUE_KEY_COMPARATOR); if (node != null) { return node; } @@ -1079,7 +1080,7 @@ public boolean waitMetaExclusiveLock(Procedure procedure) { return false; } waitProcedure(lock, procedure); - logLockedResource(LockedResourceType.META, TableName.META_TABLE_NAME.getNameAsString()); + logLockedResource(LockedResourceType.META, MetaTableName.getInstance().getNameAsString()); return true; } finally { schedUnlock(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java index 3d313c9ac3ab..5915971bd4c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java @@ -19,6 +19,7 @@ import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.procedure2.LockStatus; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -33,7 +34,7 @@ class MetaQueue extends Queue { protected MetaQueue(LockStatus lockStatus) { - super(TableName.META_TABLE_NAME, 1, lockStatus); + super(MetaTableName.getInstance(), 1, lockStatus); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java index dc9eac4c879d..30a120143ade 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java @@ -20,6 +20,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.Connection; @@ -64,7 +65,7 @@ private void migrate(MasterProcedureEnv env) throws IOException { try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME); ResultScanner scanner = nsTable.getScanner( new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions()); - BufferedMutator mutator = conn.getBufferedMutator(TableName.META_TABLE_NAME)) { + BufferedMutator mutator = conn.getBufferedMutator(MetaTableName.getInstance())) { for (Result result;;) { result = scanner.next(); if (result == null) { @@ -88,7 +89,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MigrateNamespaceTablePro switch (state) { case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY: TableDescriptor metaTableDesc = - env.getMasterServices().getTableDescriptors().get(TableName.META_TABLE_NAME); + env.getMasterServices().getTableDescriptors().get(MetaTableName.getInstance()); if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) { TableDescriptor newMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc) .setColumnFamily( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java index 0d8981891e54..6951a35db098 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.RegionInfo; @@ -107,8 +108,8 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H Set cfs = this.modifiedTableDescriptor.getColumnFamilyNames(); for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) { if (!cfs.contains(family)) { - throw new HBaseIOException( - "Delete of hbase:meta column family " + Bytes.toString(family)); + throw new HBaseIOException("Delete of " + MetaTableName.getInstance() + " column family " + + Bytes.toString(family)); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java index 642df36d535f..1b9b95889e05 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.locking.LockProcedure; @@ -174,7 +175,7 @@ List getLocks() { addToLockedResources(lockedResources, regionLocks, Function.identity(), LockedResourceType.REGION); addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER); - addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock), + addToLockedResources(lockedResources, ImmutableMap.of(MetaTableName.getInstance(), metaLock), tn -> tn.getNameAsString(), LockedResourceType.META); addToLockedResources(lockedResources, globalLocks, Function.identity(), LockedResourceType.GLOBAL); @@ -236,7 +237,7 @@ public String toString() { .append("tableLocks", filterUnlocked(tableLocks)) .append("regionLocks", filterUnlocked(regionLocks)) .append("peerLocks", filterUnlocked(peerLocks)) - .append("metaLocks", filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock))) + .append("metaLocks", filterUnlocked(ImmutableMap.of(MetaTableName.getInstance(), metaLock))) .append("globalLocks", filterUnlocked(globalLocks)).build(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java index ef11e68217a5..4adf36efc7f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java @@ -20,7 +20,7 @@ import java.io.IOException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseIOException; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -231,7 +231,7 @@ public void toStringClassDetails(StringBuilder sb) { } private boolean prepareTruncate() throws IOException { - if (getTableName().equals(TableName.META_TABLE_NAME)) { + if (getTableName().equals(MetaTableName.getInstance())) { throw new IOException("Can't truncate region in catalog tables"); } return true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index fdfea375e096..00f15239bc1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -1925,7 +1926,7 @@ public OpenRegionResponse openRegion(final RpcController controller, tableName = ProtobufUtil.toTableName(ri.getTableName()); } } - if (!TableName.META_TABLE_NAME.equals(tableName)) { + if (!MetaTableName.getInstance().equals(tableName)) { throw new ServiceException(ie); } // We are assigning meta, wait a little for regionserver to finish initialization. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java index 8bf32baada22..ebdd54a081d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; @@ -192,7 +193,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co .addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER) .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true) .setCaching(10); - try (Table table = conn.getTable(TableName.META_TABLE_NAME); + try (Table table = conn.getTable(MetaTableName.getInstance()); ResultScanner scanner = table.getScanner(scan)) { for (Result result;;) { result = scanner.next(); @@ -215,7 +216,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co public static long[] getReplicationBarriers(Connection conn, byte[] regionName) throws IOException { - try (Table table = conn.getTable(TableName.META_TABLE_NAME)) { + try (Table table = conn.getTable(MetaTableName.getInstance())) { Result result = table.get(new Get(regionName) .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) .readAllVersions()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java index 57d156ab1c2e..d90d671feffc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -544,7 +545,7 @@ public AuthResult permissionGranted(String request, User user, Action permReques TableName tableName, Map> families) { // 1. All users need read access to hbase:meta table. // this is a very common operation, so deal with it quickly. - if (TableName.META_TABLE_NAME.equals(tableName)) { + if (MetaTableName.getInstance().equals(tableName)) { if (permRequest == Action.READ) { return AuthResult.allow(request, "All users allowed", user, permRequest, tableName, families); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index 4d6f57e22edc..7c0dbffb1330 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.AsyncAdmin; @@ -660,21 +661,21 @@ private int getRegionIndex(List> startEndKeys, byte[] key) private void checkRegionIndexValid(int idx, List> startEndKeys, TableName tableName) throws IOException { if (idx < 0) { - throw new IOException("The first region info for table " + tableName - + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + throw new IOException("The first region info for table " + tableName + " can't be found in " + + MetaTableName.getInstance() + ". Please use hbck tool to fix it" + " first."); } else if ( (idx == startEndKeys.size() - 1) && !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY) ) { - throw new IOException("The last region info for table " + tableName - + " can't be found in hbase:meta.Please use hbck tool to fix it first."); + throw new IOException("The last region info for table " + tableName + " can't be found in " + + MetaTableName.getInstance() + ". Please use hbck tool to fix it" + " first."); } else if ( idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(), startEndKeys.get(idx + 1).getFirst()) == 0) ) { throw new IOException("The endkey of one region for table " + tableName - + " is not equal to the startkey of the next region in hbase:meta." - + "Please use hbck tool to fix it first."); + + " is not equal to the startkey of the next region in " + MetaTableName.getInstance() + "." + + " Please use hbck tool to fix it first."); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 75bf721ef41e..e3c13f92a017 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -147,20 +148,20 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c FileSystem fs, Path rootdir) throws IOException { // see if we already have meta descriptor on fs. Write one if not. Optional> opt = getTableDescriptorFromFs(fs, - CommonFSUtils.getTableDir(rootdir, TableName.META_TABLE_NAME), false); + CommonFSUtils.getTableDir(rootdir, MetaTableName.getInstance()), false); if (opt.isPresent()) { return opt.get().getSecond(); } TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf); TableDescriptor td = StoreFileTrackerFactory.updateWithTrackerConfigs(conf, builder.build()); - LOG.info("Creating new hbase:meta table descriptor {}", td); + LOG.info("Creating new {} table descriptor {}", MetaTableName.getInstance(), td); TableName tableName = td.getTableName(); Path tableDir = CommonFSUtils.getTableDir(rootdir, tableName); Path p = writeTableDescriptor(fs, td, tableDir, null); if (p == null) { - throw new IOException("Failed update hbase:meta table descriptor"); + throw new IOException("Failed update " + MetaTableName.getInstance() + " table descriptor"); } - LOG.info("Updated hbase:meta table descriptor to {}", p); + LOG.info("Updated {} table descriptor to {}", MetaTableName.getInstance(), p); return td; } @@ -198,7 +199,7 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now // the META table data goes to File mode BC only. Test how that affect the system. If too much, // we have to rethink about adding back the setCacheDataInL1 for META table CFs. - return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) + return TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()) .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) .setMaxVersions( conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS)) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index c3eafa7c11d1..9eff10a0b160 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -540,7 +541,7 @@ public void run() { connection = ConnectionFactory.createConnection(getConf()); admin = connection.getAdmin(); - meta = connection.getTable(TableName.META_TABLE_NAME); + meta = connection.getTable(MetaTableName.getInstance()); status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS, Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION)); } @@ -660,17 +661,19 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, Interr reportUnknownServers(); // Check if hbase:meta is found only once and in the right place if (!checkMetaRegion()) { - String errorMsg = "hbase:meta table is not consistent. "; + String errorMsg = MetaTableName.getInstance() + " table is not consistent. "; if (shouldFixAssignments()) { - errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state."; + errorMsg += "HBCK will try fixing it. Rerun once " + MetaTableName.getInstance() + + " is back " + "to consistent state."; } else { - errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency."; + errorMsg += "Run HBCK with proper fix options to fix " + MetaTableName.getInstance() + + " inconsistency."; } errors.reportError(errorMsg + " Exiting..."); return -2; } // Not going with further consistency check for tables when hbase:meta itself is not consistent. - LOG.info("Loading regionsinfo from the hbase:meta table"); + LOG.info("Loading regionsinfo from the {} table", MetaTableName.getInstance()); boolean success = loadMetaEntries(); if (!success) return -1; @@ -1219,8 +1222,8 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO * TODO -- need to add tests for this. */ private void reportEmptyMetaCells() { - errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: " - + emptyRegionInfoQualifiers.size()); + errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + MetaTableName.getInstance() + + ": " + emptyRegionInfoQualifiers.size()); if (details) { for (Result r : emptyRegionInfoQualifiers) { errors.print(" " + r); @@ -1371,7 +1374,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName, */ public void fixEmptyMetaCells() throws IOException { if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) { - LOG.info("Trying to fix empty REGIONINFO_QUALIFIER hbase:meta rows."); + LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", MetaTableName.getInstance()); for (Result region : emptyRegionInfoQualifiers) { deleteMetaRegion(region.getRow()); errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL); @@ -1574,8 +1577,8 @@ private void loadTableStates() throws IOException { // Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it // has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in // meantime. - this.tableStates.put(TableName.META_TABLE_NAME, - new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED)); + this.tableStates.put(MetaTableName.getInstance(), + new TableState(MetaTableName.getInstance(), TableState.State.ENABLED)); } /** @@ -1604,7 +1607,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException { TableName tableName = CommonFSUtils.getTableName(path); if ( (!checkMetaOnly && isTableIncluded(tableName)) - || tableName.equals(TableName.META_TABLE_NAME) + || tableName.equals(MetaTableName.getInstance()) ) { tableDirs.add(fs.getFileStatus(path)); } @@ -1649,7 +1652,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException { */ private boolean recordMetaRegion() throws IOException { List locs; - try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) { locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true); } if (locs == null || locs.isEmpty()) { @@ -2019,9 +2022,11 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException { } RegionInfo hri = h.getRegion(); if (hri == null) { - LOG.warn("Unable to close region " + hi.getRegionNameAsString() - + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" - + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value."); + LOG.warn( + "Unable to close region " + hi.getRegionNameAsString() + + " because {} had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":" + + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.", + MetaTableName.getInstance()); continue; } // close the region -- close files and remove assignment @@ -2140,8 +2145,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { assert false : "Entry for region with no data"; } else if (!inMeta && !inHdfs && isDeployed) { errors.reportError(ERROR_CODE.NOT_IN_META_HDFS, - "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but " - + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in " + + MetaTableName.getInstance() + " but " + "deployed on " + + Joiner.on(", ").join(hbi.getDeployedOn())); if (shouldFixAssignments()) { undeployRegions(hbi); } @@ -2155,8 +2161,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { + " got merge recently, its file(s) will be cleaned by CatalogJanitor later"); return; } - errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName - + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server"); + errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + "Region " + descriptiveName + " on HDFS, but not listed in " + MetaTableName.getInstance() + + " or deployed on any region server"); // restore region consistency of an adopted orphan if (shouldFixMeta()) { if (!hbi.isHdfsRegioninfoPresent()) { @@ -2196,7 +2203,7 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } } - LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI()); + LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), MetaTableName.getInstance()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), @@ -2224,7 +2231,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { return; } - LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI()); + LOG.info("Patching {} with with .regioninfo: " + hbi.getHdfsHRI(), + MetaTableName.getInstance()); int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication(); HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(), admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(), @@ -2301,9 +2309,9 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } else if (inMeta && inHdfs && isMultiplyDeployed) { errors.reportError(ERROR_CODE.MULTI_DEPLOYED, - "Region " + descriptiveName + " is listed in hbase:meta on region server " - + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers " - + Joiner.on(", ").join(hbi.getDeployedOn())); + "Region " + descriptiveName + " is listed in " + MetaTableName.getInstance() + + " on region server " + hbi.getMetaEntry().regionServer + " but is multiply assigned" + + " to region servers " + Joiner.on(", ").join(hbi.getDeployedOn())); // If we are trying to fix the errors if (shouldFixAssignments()) { errors.print("Trying to fix assignment error..."); @@ -2313,8 +2321,8 @@ else if (!inMeta && !inHdfs && !isDeployed) { } } else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) { errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, - "Region " + descriptiveName + " listed in hbase:meta on region server " - + hbi.getMetaEntry().regionServer + " but found on region server " + "Region " + descriptiveName + " listed in " + MetaTableName.getInstance() + + " on region server " + hbi.getMetaEntry().regionServer + " but found on region server " + hbi.getDeployedOn().get(0)); // If we are trying to fix the errors if (shouldFixAssignments()) { @@ -2599,7 +2607,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept metaRegions.put(value.getReplicaId(), value); } } - int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); + int metaReplication = admin.getDescriptor(MetaTableName.getInstance()).getRegionReplication(); boolean noProblem = true; // There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas // Check the deployed servers. It should be exactly one server for each replica. @@ -2614,11 +2622,12 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept if (servers.isEmpty()) { assignMetaReplica(i); } else if (servers.size() > 1) { - errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId " - + metaHbckRegionInfo.getReplicaId() + " is found on more than one region."); + errors.reportError(ERROR_CODE.MULTI_META_REGION, + MetaTableName.getInstance() + ", replicaId " + metaHbckRegionInfo.getReplicaId() + + " is found on more than one region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta, replicaId " - + metaHbckRegionInfo.getReplicaId() + ".."); + errors.print("Trying to fix a problem with " + MetaTableName.getInstance() + + ", replicaId " + metaHbckRegionInfo.getReplicaId() + ".."); setShouldRerun(); // try fix it (treat is a dupe assignment) HBaseFsckRepair.fixMultiAssignment(connection, @@ -2631,11 +2640,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept for (Map.Entry entry : metaRegions.entrySet()) { noProblem = false; errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, - "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed " - + metaRegions.size()); + MetaTableName.getInstance() + " replicas are deployed in excess. Configured " + + metaReplication + ", deployed " + metaRegions.size()); if (shouldFixAssignments()) { - errors.print( - "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta.."); + errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of " + + MetaTableName.getInstance() + ".."); setShouldRerun(); unassignMetaReplica(entry.getValue()); } @@ -2655,9 +2664,9 @@ private void unassignMetaReplica(HbckRegionInfo hi) private void assignMetaReplica(int replicaId) throws IOException, KeeperException, InterruptedException { errors.reportError(ERROR_CODE.NO_META_REGION, - "hbase:meta, replicaId " + replicaId + " is not found on any region."); + MetaTableName.getInstance() + ", replicaId " + replicaId + " is not found on any region."); if (shouldFixAssignments()) { - errors.print("Trying to fix a problem with hbase:meta.."); + errors.print("Trying to fix a problem with " + MetaTableName.getInstance() + ".."); setShouldRerun(); // try to fix it (treat it as unassigned region) RegionInfo h = RegionReplicaUtil @@ -2693,7 +2702,7 @@ public boolean visit(Result result) throws IOException { if (rl == null) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, - "Empty REGIONINFO_QUALIFIER found in hbase:meta"); + "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance()); return true; } ServerName sn = null; @@ -2703,7 +2712,7 @@ public boolean visit(Result result) throws IOException { ) { emptyRegionInfoQualifiers.add(result); errors.reportError(ERROR_CODE.EMPTY_META_CELL, - "Empty REGIONINFO_QUALIFIER found in hbase:meta"); + "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance()); return true; } RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion(); @@ -2731,7 +2740,8 @@ public boolean visit(Result result) throws IOException { } else if (previous.getMetaEntry() == null) { previous.setMetaEntry(m); } else { - throw new IOException("Two entries in hbase:meta are same " + previous); + throw new IOException( + "Two entries in " + MetaTableName.getInstance() + " are same " + previous); } } List mergeParents = CatalogFamilyFormat.getMergeRegions(result.rawCells()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java index 6ead66c16d9e..e3516ed855b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java @@ -27,8 +27,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.AsyncClusterConnection; @@ -149,7 +149,7 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri, Collection servers, int numReplicas) throws IOException { Connection conn = ConnectionFactory.createConnection(conf); - Table meta = conn.getTable(TableName.META_TABLE_NAME); + Table meta = conn.getTable(MetaTableName.getInstance()); Put put = MetaTableAccessor.makePutFromRegionInfo(hri); if (numReplicas > 1) { Random rand = ThreadLocalRandom.current(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index c1f98edd75ab..7321d7da178d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; @@ -586,13 +587,13 @@ private void unloadRegions(ServerName server, List regionServers, // For isolating hbase:meta, it should move explicitly in Ack mode, // hence the forceMoveRegionByAck = true. if (!metaSeverName.equals(server)) { - LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " is on server " - + metaSeverName + " moving to " + server); + LOG.info("Region of {} {} is on server {} moving to {}", MetaTableName.getInstance(), + metaRegionInfo.getEncodedName(), metaSeverName, server); submitRegionMovesWhileUnloading(metaSeverName, Collections.singletonList(server), movedRegions, Collections.singletonList(metaRegionInfo), true); } else { - LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " already exists" - + " on server : " + server); + LOG.info("Region of {} {} already exists on server: {}", MetaTableName.getInstance(), + metaRegionInfo.getEncodedName(), server); } isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index b8f095eb03df..36ecfbd1a247 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.wal; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; - import java.io.IOException; import java.io.InterruptedIOException; import java.util.HashMap; @@ -35,6 +33,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.ExtendedCell; import org.apache.hadoop.hbase.MetaCellComparator; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -78,7 +77,7 @@ public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter, void append(RegionEntryBuffer buffer) throws IOException { Map> familyCells = new HashMap<>(); Map familySeqIds = new HashMap<>(); - boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME); + boolean isMetaTable = buffer.tableName.equals(MetaTableName.getInstance()); // First iterate all Cells to find which column families are present and to stamp Cell with // sequence id. for (WAL.Entry entry : buffer.entryBuffer) { diff --git a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp index b965241afe2a..baaf6b68b153 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp @@ -20,6 +20,7 @@ <%@ page contentType="text/html;charset=UTF-8" import="java.util.*" + import="org.apache.hadoop.hbase.MetaTableName" import="org.apache.hadoop.hbase.NamespaceDescriptor" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.master.HMaster" @@ -56,7 +57,7 @@ <%= frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + "%" : "n/a" %> <% } %> <% String description = null; - if (tableName.equals(TableName.META_TABLE_NAME)){ + if (tableName.equals(MetaTableName.getInstance())){ description = "The hbase:meta table holds references to all User Table regions."; } else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){ description = "The hbase:canary table is used to sniff the write availability of" diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index d88d968e199f..d838cd3c50bd 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -41,6 +41,7 @@ import="org.apache.hadoop.hbase.ServerMetrics" import="org.apache.hadoop.hbase.ServerName" import="org.apache.hadoop.hbase.Size" + import="org.apache.hadoop.hbase.MetaTableName" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.client.AsyncAdmin" import="org.apache.hadoop.hbase.client.AsyncConnection" @@ -196,7 +197,7 @@ boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf); int numMetaReplicas = - master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication(); + master.getTableDescriptors().get(MetaTableName.getInstance()).getRegionReplication(); Map frags = null; if (showFragmentation) { frags = FSUtils.getTableFragmentation(master); @@ -317,7 +318,7 @@
<% //Meta table. - if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) { %> + if(fqtn.equals(MetaTableName.getInstance().getNameAsString())) { %>

Table Regions

@@ -653,7 +654,7 @@
- +
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java index f56fc57dd2d9..018d4e1182f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseClusterInterface.java @@ -388,7 +388,7 @@ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOExce * Get the ServerName of region server serving the first hbase:meta region */ public ServerName getServerHoldingMeta() throws IOException { - return getServerHoldingRegion(TableName.META_TABLE_NAME, + return getServerHoldingRegion(MetaTableName.getInstance(), RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index c885d9868844..00570b60b693 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -902,7 +902,7 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption // Populate the master address configuration from mini cluster configuration. conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c)); // Don't leave here till we've done a successful scan of the hbase:meta - try (Table t = getConnection().getTable(TableName.META_TABLE_NAME); + try (Table t = getConnection().getTable(MetaTableName.getInstance()); ResultScanner s = t.getScanner(new Scan())) { for (;;) { if (s.next() == null) { @@ -1025,7 +1025,7 @@ public void restartHBaseCluster(StartTestingClusterOption option) option.getMasterClass(), option.getRsClass()); // Don't leave here till we've done a successful scan of the hbase:meta Connection conn = ConnectionFactory.createConnection(this.conf); - Table t = conn.getTable(TableName.META_TABLE_NAME); + Table t = conn.getTable(MetaTableName.getInstance()); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { // do nothing @@ -2169,7 +2169,7 @@ public String checksumRows(final Table table) throws Exception { */ public List createMultiRegionsInMeta(final Configuration conf, final TableDescriptor htd, byte[][] startKeys) throws IOException { - try (Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table meta = getConnection().getTable(MetaTableName.getInstance())) { Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList<>(startKeys.length); MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(), @@ -2517,7 +2517,7 @@ public void process(WatchedEvent watchedEvent) { monitor.close(); if (checkStatus) { - getConnection().getTable(TableName.META_TABLE_NAME).close(); + getConnection().getTable(MetaTableName.getInstance()).close(); } } @@ -3042,7 +3042,7 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce * Waith until all system table's regions get assigned */ public void waitUntilAllSystemRegionsAssigned() throws IOException { - waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + waitUntilAllRegionsAssigned(MetaTableName.getInstance()); } /** @@ -3055,7 +3055,7 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { if (!TableName.isMetaTableName(tableName)) { - try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { + try (final Table meta = getConnection().getTable(MetaTableName.getInstance())) { LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @@ -3275,7 +3275,7 @@ public Table createRandomTable(TableName tableName, final Collection fam Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { - getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); + getMiniHBaseCluster().flushcache(MetaTableName.getInstance()); } BufferedMutator mutator = getConnection().getBufferedMutator(tableName); @@ -3356,7 +3356,7 @@ public static void waitForHostPort(String host, int port) throws IOException { } public static int getMetaRSPort(Connection connection) throws IOException { - try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) { return locator.getRegionLocation(Bytes.toBytes("")).getPort(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java index 2a438461b4e7..b90831e32c9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseMetaEdit.java @@ -68,11 +68,11 @@ public void after() throws Exception { // make sure that with every possible way, we get the same meta table descriptor. private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOException { Admin admin = UTIL.getAdmin(); - TableDescriptor get = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor get = admin.getDescriptor(MetaTableName.getInstance()); TableDescriptor list = admin.listTableDescriptors(true).stream().filter(td -> td.isMetaTable()).findAny().get(); TableDescriptor listByName = - admin.listTableDescriptors(Collections.singletonList(TableName.META_TABLE_NAME)).get(0); + admin.listTableDescriptors(Collections.singletonList(MetaTableName.getInstance())).get(0); TableDescriptor listByNs = admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME).stream() .filter(td -> td.isMetaTable()).findAny().get(); @@ -91,7 +91,7 @@ private TableDescriptor getMetaDescriptor() throws TableNotFoundException, IOExc @Test public void testEditMeta() throws IOException { Admin admin = UTIL.getAdmin(); - admin.tableExists(TableName.META_TABLE_NAME); + admin.tableExists(MetaTableName.getInstance()); TableDescriptor originalDescriptor = getMetaDescriptor(); ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY); int oldVersions = cfd.getMaxVersions(); @@ -100,11 +100,11 @@ public void testEditMeta() throws IOException { .setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING, DataBlockEncoding.ROW_INDEX_V1.toString()) .build(); - admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd); + admin.modifyColumnFamily(MetaTableName.getInstance(), cfd); byte[] extraColumnFamilyName = Bytes.toBytes("xtra"); ColumnFamilyDescriptor newCfd = ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build(); - admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd); + admin.addColumnFamily(MetaTableName.getInstance(), newCfd); TableDescriptor descriptor = getMetaDescriptor(); // Assert new max versions is == old versions plus 1. assertEquals(oldVersions + 1, @@ -126,11 +126,11 @@ public void testEditMeta() throws IOException { assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString()); assertTrue(r.getStore(extraColumnFamilyName) != null); // Assert we can't drop critical hbase:meta column family but we can drop any other. - admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName()); + admin.deleteColumnFamily(MetaTableName.getInstance(), newCfd.getName()); descriptor = getMetaDescriptor(); assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null); try { - admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY); + admin.deleteColumnFamily(MetaTableName.getInstance(), HConstants.CATALOG_FAMILY); fail("Should not reach here"); } catch (HBaseIOException hioe) { assertTrue(hioe.getMessage().contains("Delete of hbase:meta")); @@ -144,7 +144,7 @@ public void testEditMeta() throws IOException { @Test public void testAlterMetaWithReadOnly() throws IOException { Admin admin = UTIL.getAdmin(); - TableDescriptor origMetaTableDesc = admin.getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor origMetaTableDesc = admin.getDescriptor(MetaTableName.getInstance()); assertFalse(origMetaTableDesc.isReadOnly()); TableDescriptor newTD = TableDescriptorBuilder.newBuilder(origMetaTableDesc).setReadOnly(true).build(); @@ -152,7 +152,7 @@ public void testAlterMetaWithReadOnly() throws IOException { admin.modifyTable(newTD); fail("Meta table can't be set as read only"); } catch (Exception e) { - assertFalse(admin.getDescriptor(TableName.META_TABLE_NAME).isReadOnly()); + assertFalse(admin.getDescriptor(MetaTableName.getInstance()).isReadOnly()); } // Create a table to check region assignment & meta operation diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java index a894bbcd0aeb..cf0ae4f7eb80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java @@ -91,7 +91,7 @@ public void testGetMasterInfoPort() throws Exception { @Test public void testInfoServersRedirect() throws Exception { // give the cluster time to start up - UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); + UTIL.getConnection().getTable(MetaTableName.getInstance()).close(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "master.jsp"); assertContainsContent(new URL("http://localhost:" + port + "/master-status"), "master.jsp"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java index 91e040f1db13..e1c76567be7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java @@ -87,7 +87,7 @@ private void multiMutate(byte[] row, List mutations) throws IOExceptio } } MutateRowsRequest request = builder.build(); - AsyncTable table = UTIL.getAsyncConnection().getTable(TableName.META_TABLE_NAME); + AsyncTable table = UTIL.getAsyncConnection().getTable(MetaTableName.getInstance()); CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub, (stub, controller, done) -> stub.mutateRows(controller, request, done), row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java index 0005a2becde7..67110aca8181 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java @@ -117,7 +117,7 @@ public void verifyReservedNS() throws IOException { assertEquals(2, admin.listNamespaceDescriptors().length); // verify existence of system tables - Set systemTables = Sets.newHashSet(TableName.META_TABLE_NAME); + Set systemTables = Sets.newHashSet(MetaTableName.getInstance()); List descs = admin.listTableDescriptorsByNamespace( Bytes.toBytes(NamespaceDescriptor.SYSTEM_NAMESPACE.getName())); assertEquals(systemTables.size(), descs.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java index 69e6e4ac83df..16ede9b98c60 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerInternalsTracing.java @@ -77,7 +77,7 @@ public Setup(final Supplier testingUtilSupplier) { @Override protected void before() throws Throwable { final HBaseTestingUtil testingUtil = testingUtilSupplier.get(); - testingUtil.waitTableAvailable(TableName.META_TABLE_NAME); + testingUtil.waitTableAvailable(MetaTableName.getInstance()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java index 031dff736c84..77fc747178fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.Region; @@ -49,7 +50,7 @@ public abstract class AbstractTestRegionLocator { protected static void startClusterAndCreateTable() throws Exception { UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, REGION_REPLICATION); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), REGION_REPLICATION); TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build(); @@ -69,7 +70,7 @@ protected static void startClusterAndCreateTable() throws Exception { @After public void tearDownAfterTest() throws IOException { clearCache(TABLE_NAME); - clearCache(TableName.META_TABLE_NAME); + clearCache(MetaTableName.getInstance()); } private byte[] getStartKey(int index) { @@ -171,7 +172,7 @@ private void assertMetaRegionLocation(HRegionLocation loc, int replicaId) { assertArrayEquals(HConstants.EMPTY_END_ROW, region.getEndKey()); assertEquals(replicaId, region.getReplicaId()); ServerName expected = - findRegionLocation(TableName.META_TABLE_NAME, region.getStartKey(), replicaId); + findRegionLocation(MetaTableName.getInstance(), region.getStartKey(), replicaId); assertEquals(expected, loc.getServerName()); } @@ -184,19 +185,19 @@ private void assertMetaRegionLocations(List locs) { @Test public void testMeta() throws IOException { - assertMetaStartOrEndKeys(getStartKeys(TableName.META_TABLE_NAME)); - assertMetaStartOrEndKeys(getEndKeys(TableName.META_TABLE_NAME)); - Pair startEndKeys = getStartEndKeys(TableName.META_TABLE_NAME); + assertMetaStartOrEndKeys(getStartKeys(MetaTableName.getInstance())); + assertMetaStartOrEndKeys(getEndKeys(MetaTableName.getInstance())); + Pair startEndKeys = getStartEndKeys(MetaTableName.getInstance()); assertMetaStartOrEndKeys(startEndKeys.getFirst()); assertMetaStartOrEndKeys(startEndKeys.getSecond()); for (int replicaId = 0; replicaId < REGION_REPLICATION; replicaId++) { assertMetaRegionLocation( - getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, replicaId), + getRegionLocation(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW, replicaId), replicaId); } assertMetaRegionLocations( - getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW)); - assertMetaRegionLocations(getAllRegionLocations(TableName.META_TABLE_NAME)); + getRegionLocations(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW)); + assertMetaRegionLocations(getAllRegionLocations(MetaTableName.getInstance())); } protected abstract byte[][] getStartKeys(TableName tableName) throws IOException; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java index 2b6b3d017fcb..7b2aa41a2202 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java @@ -24,9 +24,9 @@ import java.util.HashSet; import java.util.Set; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; @@ -58,7 +58,7 @@ protected static void startCluster() throws Exception { .numAlwaysStandByMasters(1).numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build(); TEST_UTIL.startMiniCluster(option); Admin admin = TEST_UTIL.getAdmin(); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 3); AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager(); Set sns = new HashSet(); ServerName hbaseMetaServerName = am.getRegionStates() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java index a98ae217e3c2..25953227d8df 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java @@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; @@ -47,7 +48,7 @@ static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util, ConnectionRe throws IOException { Configuration conf = util.getConfiguration(); int regionReplicaCount = - util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication(); + util.getAdmin().getDescriptor(MetaTableName.getInstance()).getRegionReplication(); Waiter.waitFor(conf, conf.getLong("hbase.client.sync.wait.timeout.msec", 60000), 200, true, new ExplainingPredicate() { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 68a841b7d671..986701050453 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; @@ -64,7 +65,7 @@ public class TestAdmin extends TestAdminBase { @Test public void testListTableDescriptors() throws IOException { TableDescriptor metaTableDescriptor = - TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); + TEST_UTIL.getAdmin().getDescriptor(MetaTableName.getInstance()); List tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(true); assertTrue(tableDescriptors.contains(metaTableDescriptor)); tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index 2cf088fa6a82..a6dd60781bd0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.ClusterMetrics.Option; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableExistsException; @@ -84,13 +85,13 @@ public class TestAdmin2 extends TestAdminBase { public void testCreateBadTables() throws IOException { String msg = null; try { - ADMIN.createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); + ADMIN.createTable(TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build()); } catch (TableExistsException e) { msg = e.toString(); } assertTrue("Unexcepted exception message " + msg, msg != null && msg.startsWith(TableExistsException.class.getName()) - && msg.contains(TableName.META_TABLE_NAME.getNameAsString())); + && msg.contains(MetaTableName.getInstance().getNameAsString())); // Now try and do concurrent creation with a bunch of threads. TableDescriptor tableDescriptor = @@ -456,7 +457,7 @@ private void setUpforLogRolling() { private HRegionServer startAndWriteData(TableName tableName, byte[] value) throws IOException, InterruptedException { // When the hbase:meta table can be opened, the region servers are running - TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); + TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()).close(); // Create the test table and open it TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName) @@ -486,7 +487,7 @@ private HRegionServer startAndWriteData(TableName tableName, byte[] value) @Test public void testDisableCatalogTable() throws Exception { try { - ADMIN.disableTable(TableName.META_TABLE_NAME); + ADMIN.disableTable(MetaTableName.getInstance()); fail("Expected to throw ConstraintException"); } catch (ConstraintException e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java index bb0eb31d2549..66ac07cdf88f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -54,7 +55,7 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase { @BeforeClass public static void setUpBeforeClass() throws Exception { TestAsyncAdminBase.setUpBeforeClass(); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3); try (ConnectionRegistry registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry); @@ -80,7 +81,7 @@ public void testMoveNonDefaultReplica() throws InterruptedException, ExecutionException, IOException { createTableWithDefaultConf(tableName, 3); testMoveNonDefaultReplica(tableName); - testMoveNonDefaultReplica(TableName.META_TABLE_NAME); + testMoveNonDefaultReplica(MetaTableName.getInstance()); } @Test @@ -138,11 +139,11 @@ public void testCloneTableSchema() throws IOException, InterruptedException, Exe @Test public void testGetTableRegions() throws InterruptedException, ExecutionException, IOException { - List metaRegions = admin.getRegions(TableName.META_TABLE_NAME).get(); + List metaRegions = admin.getRegions(MetaTableName.getInstance()).get(); assertEquals(3, metaRegions.size()); for (int i = 0; i < 3; i++) { RegionInfo metaRegion = metaRegions.get(i); - assertEquals(TableName.META_TABLE_NAME, metaRegion.getTable()); + assertEquals(MetaTableName.getInstance(), metaRegion.getTable()); assertEquals(i, metaRegion.getReplicaId()); } createTableWithDefaultConf(tableName, 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java index e14cd32a3889..871590d732e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MatcherPredicate; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.MiniClusterRule; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.StartTestingClusterOption; @@ -105,7 +106,7 @@ private void assertInitialized() { protected void before() throws Throwable { final AsyncAdmin admin = connectionRule.getAsyncConnection().getAdmin(); testUtil = miniClusterRule.getTestingUtility(); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 3); testUtil.waitUntilNoRegionsInTransition(); registry = ConnectionRegistryFactory.create(testUtil.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(testUtil, registry); @@ -163,7 +164,7 @@ public void test() throws Exception { TraceUtil.trace(() -> { try { - testLocator(miniClusterRule.getTestingUtility(), TableName.META_TABLE_NAME, + testLocator(miniClusterRule.getTestingUtility(), MetaTableName.getInstance(), new Locator() { @Override public void updateCachedLocationOnError(HRegionLocation loc, Throwable error) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java index 0bfbd18eb32f..4da4c87daba3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; @@ -106,9 +107,9 @@ public static void setUp() throws Exception { admin.balancerSwitch(false, true); // Enable hbase:meta replication. - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, NUM_OF_META_REPLICA); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), NUM_OF_META_REPLICA); TEST_UTIL.waitFor(30000, - () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() + () -> TEST_UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() >= NUM_OF_META_REPLICA); SPLIT_KEYS = new byte[8][]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java index 61dd87007c11..4a51b6d26465 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; @@ -37,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil; @@ -86,7 +86,8 @@ public void testSplitSwitch() throws Exception { final int rows = 10000; TestAsyncRegionAdminApi.loadData(tableName, families, rows); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); int originalCount = regionLocations.size(); @@ -117,7 +118,8 @@ public void testMergeSwitch() throws Exception { byte[][] families = { FAMILY }; TestAsyncRegionAdminApi.loadData(tableName, families, 1000); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); int originalCount = regionLocations.size(); @@ -162,7 +164,8 @@ public void testMergeRegions() throws Exception { byte[][] splitRows = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("6") }; createTableWithDefaultConf(tableName, splitRows); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo regionA; @@ -242,7 +245,8 @@ private void splitTest(TableName tableName, int rowCount, boolean isSplitRegion, // create table createTableWithDefaultConf(tableName); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals(1, regionLocations.size()); @@ -299,7 +303,8 @@ public void testTruncateRegion() throws Exception { final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) }; createTableWithDefaultConf(tableName, splitKeys, bFamilies); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo regionToBeTruncated = regionLocations.get(0).getRegion(); @@ -333,7 +338,8 @@ public void testTruncateReplicaRegionNotAllowed() throws Exception { final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) }; createTableWithDefaultConf(tableName, 2, splitKeys, bFamilies); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regionLocations = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); RegionInfo primaryRegion = regionLocations.get(0).getRegion(); @@ -354,7 +360,7 @@ public void testTruncateReplicaRegionNotAllowed() throws Exception { @Test public void testTruncateRegionsMetaTableRegionsNotAllowed() throws Exception { - AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(META_TABLE_NAME); + AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(MetaTableName.getInstance()); List regionLocations = locator.getAllRegionLocations().get(); HRegionLocation regionToBeTruncated = regionLocations.get(0); // 1 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java index 33fbc906f19f..f6732c611194 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -73,14 +73,15 @@ public void testCreateTable() throws Exception { static TableState.State getStateFromMeta(TableName table) throws Exception { Optional state = ClientMetaTableAccessor - .getTableState(ASYNC_CONN.getTable(TableName.META_TABLE_NAME), table).get(); + .getTableState(ASYNC_CONN.getTable(MetaTableName.getInstance()), table).get(); assertTrue(state.isPresent()); return state.get().getState(); } @Test public void testCreateTableNumberOfRegions() throws Exception { - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); createTableWithDefaultConf(tableName); List regionLocations = @@ -128,7 +129,8 @@ public void testCreateTableWithRegions() throws Exception { boolean tableAvailable = admin.isTableAvailable(tableName).get(); assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); Iterator hris = regions.iterator(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java index 9db82a3bcd82..1b2ef352515c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java @@ -26,6 +26,7 @@ import java.util.Set; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -57,7 +58,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase { @Test public void testDisableCatalogTable() throws Exception { try { - this.admin.disableTable(TableName.META_TABLE_NAME).join(); + this.admin.disableTable(MetaTableName.getInstance()).join(); fail("Expected to throw ConstraintException"); } catch (Exception e) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java index d9007f748308..93e684453f51 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.MatcherAssert.assertThat; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -66,10 +66,10 @@ public void testTableExist() throws Exception { TEST_UTIL.createTable(tableName, FAMILY); exist = admin.tableExists(tableName).get(); assertTrue(exist); - exist = admin.tableExists(TableName.META_TABLE_NAME).get(); + exist = admin.tableExists(MetaTableName.getInstance()).get(); assertTrue(exist); // meta table already exists - exist = admin.tableExists(TableName.META_TABLE_NAME).get(); + exist = admin.tableExists(MetaTableName.getInstance()).get(); assertTrue(exist); } @@ -118,7 +118,7 @@ public void testListTables() throws Exception { assertEquals(0, size); Collections.addAll(tableNames, tables); - tableNames.add(TableName.META_TABLE_NAME); + tableNames.add(MetaTableName.getInstance()); tableDescs = admin.listTableDescriptors(tableNames).get(); size = tableDescs.size(); assertEquals(tables.length + 1, size); @@ -126,7 +126,7 @@ public void testListTables() throws Exception { assertTrue("tableName should be equal in order", tableDescs.get(j).getTableName().equals(tables[i])); } - assertTrue(tableDescs.get(size - 1).getTableName().equals(TableName.META_TABLE_NAME)); + assertTrue(tableDescs.get(size - 1).getTableName().equals(MetaTableName.getInstance())); for (int i = 0; i < tables.length; i++) { admin.disableTable(tables[i]).join(); @@ -205,7 +205,7 @@ public void testDisableAndEnableTable() throws Exception { assertTrue(ok); // meta table can not be disabled. try { - admin.disableTable(TableName.META_TABLE_NAME).get(); + admin.disableTable(MetaTableName.getInstance()).get(); fail("meta table can not be disabled"); } catch (ExecutionException e) { Throwable cause = e.getCause(); @@ -285,7 +285,8 @@ public void testEnableTableRetainAssignment() throws Exception { int expectedRegions = splitKeys.length + 1; createTableWithDefaultConf(tableName, splitKeys); - AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME); + AsyncTable metaTable = + ASYNC_CONN.getTable(MetaTableName.getInstance()); List regions = ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get(); assertEquals( @@ -314,8 +315,8 @@ public void testIsTableEnabledAndDisabled() throws Exception { assertTrue(admin.isTableDisabled(tableName).get()); // meta table is always enabled - assertTrue(admin.isTableEnabled(TableName.META_TABLE_NAME).get()); - assertFalse(admin.isTableDisabled(TableName.META_TABLE_NAME).get()); + assertTrue(admin.isTableEnabled(MetaTableName.getInstance()).get()); + assertFalse(admin.isTableDisabled(MetaTableName.getInstance()).get()); } @Test @@ -323,6 +324,6 @@ public void testIsTableAvailable() throws Exception { createTableWithDefaultConf(tableName); TEST_UTIL.waitTableAvailable(tableName); assertTrue(admin.isTableAvailable(tableName).get()); - assertTrue(admin.isTableAvailable(TableName.META_TABLE_NAME).get()); + assertTrue(admin.isTableAvailable(MetaTableName.getInstance()).get()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java index 18c53a49de7b..db794ff014ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -94,14 +95,14 @@ public static void setUp() throws Exception { conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, FailPrimaryMetaScanCp.class.getName()); UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 3); try (ConnectionRegistry registry = ConnectionRegistryFactory.create(conf, User.getCurrent())) { RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry); } try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) { table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)); } - UTIL.flush(TableName.META_TABLE_NAME); + UTIL.flush(MetaTableName.getInstance()); // wait for the store file refresh so we can read the region location from secondary meta // replicas Thread.sleep(2000); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java index 184b4ba0d3cc..a392ff5c9a69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW; -import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -28,6 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; @@ -72,9 +72,9 @@ public static void setUp() throws Exception { admin.balancerSwitch(false, true); // Enable hbase:meta replication. - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), numOfMetaReplica); TEST_UTIL.waitFor(30000, - () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() + () -> TEST_UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() >= numOfMetaReplica); registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); @@ -95,14 +95,15 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted CatalogReplicaLoadBalanceSimpleSelector.class.getName()); CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory - .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> { + .createSelector(replicaSelectorClass, MetaTableName.getInstance(), CONN, () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() .get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + LOG.error("Failed to get table {}'s region replication, ", MetaTableName.getInstance(), + e); } return numOfReplicas; }); @@ -116,20 +117,21 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted IntStream.range(0, numOfMetaReplica).forEach(i -> assertNotEquals(replicaIdCount[i], 0)); // Change to No meta replica - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 1); TEST_UTIL.waitFor(30000, - () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 1); + () -> TEST_UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() == 1); CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica = - CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, META_TABLE_NAME, - CONN, () -> { + CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, + MetaTableName.getInstance(), CONN, () -> { int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; try { RegionLocations metaLocations = CONN.registry.getMetaRegionLocations() .get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); numOfReplicas = metaLocations.size(); } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + LOG.error("Failed to get table {}'s region replication, ", MetaTableName.getInstance(), + e); } return numOfReplicas; }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java index 912ded0a27bb..3b5a1496e403 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -48,7 +48,7 @@ public void testReplicaCleanup() throws Exception { ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); List metaReplicaZnodes = zkw.getMetaReplicaNodes(); assertEquals(3, metaReplicaZnodes.size()); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 1); metaReplicaZnodes = zkw.getMetaReplicaNodes(); assertEquals(1, metaReplicaZnodes.size()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java index 583dc02763d0..55bf792f568f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer; import org.apache.hadoop.hbase.TableName; @@ -453,7 +454,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque } else { ScanResponse scanRes = super.scan(controller, request); String regionName = Bytes.toString(request.getRegion().getValue().toByteArray()); - if (!regionName.contains(TableName.META_TABLE_NAME.getNameAsString())) { + if (!regionName.contains(MetaTableName.getInstance().getNameAsString())) { tableScannerId = scanRes.getScannerId(); if (sleepOnOpen) { try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java index 253e61f995cf..e40208aa3b90 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ScanMetricsRegionInfo; @@ -89,8 +90,8 @@ public void setup() throws IOException { conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.getDefaultRootDirPath(); fs = TEST_UTIL.getTestFileSystem(); - htd = TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); - hri = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0); + htd = TEST_UTIL.getAdmin().getDescriptor(MetaTableName.getInstance()); + hri = TEST_UTIL.getAdmin().getRegions(MetaTableName.getInstance()).get(0); scan = new Scan(); } @@ -200,7 +201,7 @@ private void testScanMetricsWithScanMetricsByRegionDisabled(ScanMetrics scanMetr Configuration copyConf = new Configuration(conf); Scan scan = new Scan(); scan.setScanMetricsEnabled(true); - TEST_UTIL.getAdmin().flush(TableName.META_TABLE_NAME); + TEST_UTIL.getAdmin().flush(MetaTableName.getInstance()); try (ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, scanMetrics)) { clientSideRegionScanner.next(); @@ -229,7 +230,7 @@ private void testScanMetricByRegion(ScanMetrics scanMetrics) throws IOException Configuration copyConf = new Configuration(conf); Scan scan = new Scan(); scan.setEnableScanMetricsByRegion(true); - TEST_UTIL.getAdmin().flush(TableName.META_TABLE_NAME); + TEST_UTIL.getAdmin().flush(MetaTableName.getInstance()); try (ClientSideRegionScanner clientSideRegionScanner = new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, scanMetrics)) { clientSideRegionScanner.next(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java index 2384e02955da..225c30046fde 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; @@ -92,7 +93,7 @@ public void testDeleteForSureClearsAllTableRowsFromMeta() } // Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR // content from a few of the rows. - try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table metaTable = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance())) { try (ResultScanner scanner = metaTable.getScanner( MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) { for (Result result : scanner) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java index 8f820158e460..d224ad41a277 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -2532,7 +2533,7 @@ public void testFilterAllRecords() throws IOException { scan.setCaching(1); // Filter out any records scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0]))); - try (Table table = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance())) { try (ResultScanner s = table.getScanner(scan)) { assertNull(s.next()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java index f93fc9d5bf5d..e14944a14e03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java @@ -21,8 +21,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -50,7 +50,7 @@ public static void setUp() throws Exception { public void testUpgradeAndIncreaseReplicaCount() throws Exception { HMaster oldMaster = TEST_UTIL.getMiniHBaseCluster().getMaster(); TableDescriptors oldTds = oldMaster.getTableDescriptors(); - TableDescriptor oldMetaTd = oldTds.get(TableName.META_TABLE_NAME); + TableDescriptor oldMetaTd = oldTds.get(MetaTableName.getInstance()); assertEquals(3, oldMetaTd.getRegionReplication()); // force update the replica count to 1 and then kill the master, to simulate that hen upgrading, // we have no region replication in meta table descriptor but we actually have meta region diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java index d79603cea3cc..365e371bf13d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java @@ -32,9 +32,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.security.User; @@ -61,7 +61,7 @@ public static void setUp() throws Exception { StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder(); builder.numMasters(3).numRegionServers(3); TEST_UTIL.startMiniCluster(builder.build()); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java index ce52918bfe42..267593311fdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionTooBusyException; @@ -87,7 +88,7 @@ public static void setUpBeforeClass() throws Exception { conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithFakeRpcServices.class.getName()); TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(MetaTableName.getInstance()); badRS = TEST_UTIL.getHBaseCluster().getRegionServer(0); assertTrue(badRS.getRSRpcServices() instanceof FakeRSRpcServices); TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java index 29223dea5dbe..cfe193f60fb8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java @@ -30,9 +30,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaRegionLocationCache; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.security.User; @@ -63,7 +63,7 @@ public class TestMetaRegionLocationCache { @BeforeClass public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3); REGISTRY = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent()); RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY); TEST_UTIL.getAdmin().balancerSwitch(false, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java index eae7078639d1..1fe06bcd3c56 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -54,7 +54,7 @@ public static void setUp() throws Exception { @Test public void testMetaHTDReplicaCount() throws Exception { assertEquals(3, - TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication()); + TEST_UTIL.getAdmin().getDescriptor(MetaTableName.getInstance()).getRegionReplication()); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java index 267d618d03d1..240e60099edb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore; @@ -97,7 +98,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex ServerName master = null; try (Connection c = ConnectionFactory.createConnection(util.getConfiguration())) { try (Table htable = util.createTable(TABLE, FAMILIES)) { - util.getAdmin().flush(TableName.META_TABLE_NAME); + util.getAdmin().flush(MetaTableName.getInstance()); Thread.sleep( conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 6); List regions = MetaTableAccessor.getTableRegions(c, TABLE); @@ -114,7 +115,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex Thread.sleep(10); hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0)); } while (primary.equals(hrl.getServerName())); - util.getAdmin().flush(TableName.META_TABLE_NAME); + util.getAdmin().flush(MetaTableName.getInstance()); Thread.sleep( conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 3); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java index 55646c35e435..5e2a11a13dc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java @@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -48,7 +49,7 @@ public class TestMultiActionMetricsFromClient { public static void setUpBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(MetaTableName.getInstance()); TEST_UTIL.createTable(TABLE_NAME, FAMILY); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java index c38be19a238e..a453646e4247 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; @@ -261,7 +262,7 @@ public static void beforeClass() throws Exception { HTU.startMiniCluster(NB_SERVERS); // Enable meta replica at server side - HBaseTestingUtil.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, 2); + HBaseTestingUtil.setReplicas(HTU.getAdmin(), MetaTableName.getInstance(), 2); HTU.getHBaseCluster().startMaster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java index d33cc943355c..4c24dcc10949 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.BootstrapNodeManager; import org.apache.hadoop.hbase.regionserver.RSRpcServices; @@ -72,7 +72,7 @@ public static void setUpBeforeClass() throws Exception { UTIL.getConfiguration().setLong(RpcConnectionRegistry.MIN_SECS_BETWEEN_REFRESHES, 0); UTIL.getConfiguration().setLong(BootstrapNodeManager.REQUEST_MASTER_MIN_INTERVAL_SECS, 1); UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 3); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 52ccd5d8b7da..c196dded6618 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; @@ -280,13 +281,13 @@ public void testAsyncTable() throws Exception { public void testChangeMetaReplicaCount() throws Exception { Admin admin = TEST_UTIL.getAdmin(); try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + TEST_UTIL.getConnection().getRegionLocator(MetaTableName.getInstance())) { assertEquals(1, locator.getAllRegionLocations().size()); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 3); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 3); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 2); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 2); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 2); - HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 1); TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java index c39fc076ef2b..f24a894085bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java @@ -20,8 +20,8 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.BeforeClass; @@ -50,7 +50,7 @@ public void testShutdownOfReplicaHolder() throws Exception { // checks that the when the server holding meta replica is shut down, the meta replica // can be recovered try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { + RegionLocator locator = conn.getRegionLocator(MetaTableName.getInstance())) { HRegionLocation hrl = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1); ServerName oldServer = hrl.getServerName(); TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 8abb4d754a7a..693713c572d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.TableNotFoundException; @@ -163,7 +164,7 @@ public static void cleanupTest() throws Exception { */ @Test(expected = IllegalArgumentException.class) public void testMetaTablesSnapshot() throws Exception { - UTIL.getAdmin().snapshot("metaSnapshot", TableName.META_TABLE_NAME); + UTIL.getAdmin().snapshot("metaSnapshot", MetaTableName.getInstance()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java index 6d585245e959..474f8153ca3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; @@ -64,7 +64,7 @@ public class TestZKConnectionRegistry { @BeforeClass public static void setUp() throws Exception { TEST_UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3); REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration(), null); } @@ -89,7 +89,7 @@ public void test() throws InterruptedException, ExecutionException, IOException IntStream.range(0, 3).forEach(i -> { HRegionLocation loc = locs.getRegionLocation(i); assertNotNull("Replica " + i + " doesn't have location", loc); - assertEquals(TableName.META_TABLE_NAME, loc.getRegion().getTable()); + assertEquals(MetaTableName.getInstance(), loc.getRegion().getTable()); assertEquals(i, loc.getRegion().getReplicaId()); }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java index b613e2a824c6..e42af8bbed93 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/TestInfoServersACL.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LocalHBaseCluster; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; @@ -200,7 +201,7 @@ public void testTableActionsAvailableForAdmins() throws Exception { @Override public Void run() throws Exception { // Check the expected content is present in the http response - Pair pair = getTablePage(TableName.META_TABLE_NAME); + Pair pair = getTablePage(MetaTableName.getInstance()); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertTrue("expected=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), pair.getSecond().contains(expectedAuthorizedContent)); @@ -213,7 +214,7 @@ public Void run() throws Exception { nonAdmin.doAs(new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - Pair pair = getTablePage(TableName.META_TABLE_NAME); + Pair pair = getTablePage(MetaTableName.getInstance()); assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue()); assertFalse( "should not find=" + expectedAuthorizedContent + ", content=" + pair.getSecond(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 41848a58b784..d09da4fefafd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -243,7 +244,7 @@ public void testFlushedSequenceIdPersistLoad() throws Exception { .setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build(); Table table = TEST_UTIL.createTable(tableDescriptor, null); // flush META region - TEST_UTIL.flush(TableName.META_TABLE_NAME); + TEST_UTIL.flush(MetaTableName.getInstance()); // wait for regionserver report Threads.sleep(msgInterval * 2); // record flush seqid before cluster shutdown diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java index b3fadc7ed27a..6abc5e7d680d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -64,7 +64,7 @@ public static void teardownTest() throws Exception { @Test public void tesMetaDescriptorHasSFTConfig() throws Exception { - TableDescriptor descriptor = UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME); + TableDescriptor descriptor = UTIL.getAdmin().getDescriptor(MetaTableName.getInstance()); assertEquals(FILE.name(), descriptor.getValue(TRACKER_IMPL)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java index f640c3084cb8..86cb2ba4af3d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; @@ -307,7 +308,7 @@ public void testIncompleteMetaTableReplicaInformation() throws Exception { ADMIN.disableTable(tableName); // now delete one replica info from all the rows // this is to make the meta appear to be only partially updated - Table metaTable = ADMIN.getConnection().getTable(TableName.META_TABLE_NAME); + Table metaTable = ADMIN.getConnection().getTable(MetaTableName.getInstance()); for (byte[] row : tableRows) { Delete deleteOneReplicaLocation = new Delete(row); deleteOneReplicaLocation.addColumns(HConstants.CATALOG_FAMILY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java index 910692d93c30..46ff1a582235 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncTable; @@ -93,7 +94,7 @@ public void testNewCluster() throws Exception { Connection conn = TEST_UTIL.getConnection(); assertTrue(conn.getAdmin().isMasterInMaintenanceMode()); - try (Table table = conn.getTable(TableName.META_TABLE_NAME); + try (Table table = conn.getTable(MetaTableName.getInstance()); ResultScanner scanner = table.getScanner(new Scan())) { assertNotNull("Could not read meta.", scanner.next()); } @@ -120,7 +121,7 @@ public void testExistingCluster() throws Exception { Connection conn = TEST_UTIL.getConnection(); assertTrue(conn.getAdmin().isMasterInMaintenanceMode()); - try (Table table = conn.getTable(TableName.META_TABLE_NAME); + try (Table table = conn.getTable(MetaTableName.getInstance()); ResultScanner scanner = table.getScanner(HConstants.TABLE_FAMILY); Stream results = StreamSupport.stream(scanner.spliterator(), false)) { assertTrue("Did not find user table records while reading hbase:meta", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java index e59ef4919126..253c5899c426 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -301,7 +302,7 @@ public void testKillRSWithOpeningRegion2482() throws Exception { */ private static int addToEachStartKey(final int expected) throws IOException { Table t = TEST_UTIL.getConnection().getTable(TABLENAME); - Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + Table meta = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()); int rows = 0; Scan scan = new Scan(); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java index b6bce31eed9c..df8dca55a09e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java @@ -22,9 +22,9 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; @@ -66,7 +66,7 @@ public static void tearDownAfterClass() throws Exception { @Test public void testStopActiveMaster() throws Exception { try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) { + RegionLocator locator = conn.getRegionLocator(MetaTableName.getInstance())) { ServerName oldMetaServer = locator.getAllRegionLocations().get(0).getServerName(); ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java index cdb243b06cdb..bd3c9398a13b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java @@ -30,10 +30,10 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -68,7 +68,7 @@ public class TestMigrateAndMirrorMetaLocations { @BeforeClass public static void setUp() throws Exception { UTIL.startMiniCluster(3); - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 2); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 2); } @AfterClass @@ -143,20 +143,20 @@ public void test() throws Exception { } // wait until all meta regions have been assigned UTIL.waitFor(30000, - () -> UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 2); + () -> UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() == 2); // make sure all the SCPs are finished waitUntilNoSCP(); checkMirrorLocation(2); // increase replica count to 3 - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 3); checkMirrorLocation(3); byte[] replica2Data = ZKUtil.getData(UTIL.getZooKeeperWatcher(), UTIL.getZooKeeperWatcher().getZNodePaths().getZNodeForReplica(2)); // decrease replica count to 1 - HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 1); + HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 1); checkMirrorLocation(1); // restart the whole cluster, put an extra replica znode on zookeeper, to see if we will remove diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java index 30dd308c28f3..6e37bf1be5c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; @@ -77,7 +78,7 @@ public static final class SuspendProcedure extends Procedure @Override public TableName getTableName() { - return TableName.META_TABLE_NAME; + return MetaTableName.getInstance(); } @Override @@ -154,7 +155,7 @@ public static void tearDown() throws Exception { private void removeNamespaceFamily() throws IOException { FileSystem fs = UTIL.getTestFileSystem(); Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); - Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME); + Path tableDir = CommonFSUtils.getTableDir(rootDir, MetaTableName.getInstance()); TableDescriptor metaTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); TableDescriptor noNsMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc) .removeColumnFamily(HConstants.NAMESPACE_FAMILY).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java index 42f54e5c8758..19e05e26f551 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; @@ -126,7 +127,7 @@ private void validateRecreateClusterWithUserTableEnabled(boolean cleanupWALs, private void restartHBaseCluster(boolean cleanUpWALs, boolean cleanUpZnodes) throws Exception { // flush cache so that everything is on disk - TEST_UTIL.getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); + TEST_UTIL.getMiniHBaseCluster().flushcache(MetaTableName.getInstance()); TEST_UTIL.getMiniHBaseCluster().flushcache(); List oldServers = @@ -177,7 +178,7 @@ private void prepareDataBeforeRecreate(HBaseTestingUtil testUtil, TableName tabl put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("c"), Bytes.toBytes("v")); table.put(put); - ensureTableNotColocatedWithSystemTable(tableName, TableName.META_TABLE_NAME); + ensureTableNotColocatedWithSystemTable(tableName, MetaTableName.getInstance()); } private void ensureTableNotColocatedWithSystemTable(TableName userTable, TableName systemTable) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java index 866f74b73191..a6b11a569875 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -80,7 +81,7 @@ public void testRestart() throws IOException, InterruptedException { table.put(new Put(row).addColumn(FAMILY, QUALIFIER, row)); } // flush all in memory data - UTIL.flush(TableName.META_TABLE_NAME); + UTIL.flush(MetaTableName.getInstance()); UTIL.flush(NAME); // stop master first, so when stopping region server, we will not schedule a SCP. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java index 8263298a8e4f..f20b06c7b003 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java @@ -21,7 +21,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.AsyncAdmin; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -64,13 +64,13 @@ public static void tearDown() throws Exception { public void test() throws Exception { RegionServerThread rsThread = null; for (RegionServerThread t : UTIL.getMiniHBaseCluster().getRegionServerThreads()) { - if (!t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) { + if (!t.getRegionServer().getRegions(MetaTableName.getInstance()).isEmpty()) { rsThread = t; break; } } HRegionServer rs = rsThread.getRegionServer(); - RegionInfo hri = rs.getRegions(TableName.META_TABLE_NAME).get(0).getRegionInfo(); + RegionInfo hri = rs.getRegions(MetaTableName.getInstance()).get(0).getRegionInfo(); HMaster master = UTIL.getMiniHBaseCluster().getMaster(); ProcedureExecutor executor = master.getMasterProcedureExecutor(); DummyRegionProcedure proc = new DummyRegionProcedure(executor.getEnvironment(), hri); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java index b86493287e52..ce23d2342154 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java @@ -21,6 +21,7 @@ import java.util.concurrent.CountDownLatch; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ProcedureTestUtil; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -150,11 +151,11 @@ public static void setUp() throws Exception { UTIL.createTable(TABLE_NAME, CF); UTIL.getAdmin().balancerSwitch(false, true); HRegionServer srcRs = UTIL.getRSForFirstRegionInTable(TABLE_NAME); - if (!srcRs.getRegions(TableName.META_TABLE_NAME).isEmpty()) { - RegionInfo metaRegion = srcRs.getRegions(TableName.META_TABLE_NAME).get(0).getRegionInfo(); + if (!srcRs.getRegions(MetaTableName.getInstance()).isEmpty()) { + RegionInfo metaRegion = srcRs.getRegions(MetaTableName.getInstance()).get(0).getRegionInfo(); HRegionServer dstRs = UTIL.getOtherRegionServer(srcRs); UTIL.getAdmin().move(metaRegion.getEncodedNameAsBytes(), dstRs.getServerName()); - UTIL.waitFor(30000, () -> !dstRs.getRegions(TableName.META_TABLE_NAME).isEmpty()); + UTIL.waitFor(30000, () -> !dstRs.getRegions(MetaTableName.getInstance()).isEmpty()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java index 2f88f6087dd4..6941fe667e39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ProcedureTestUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.AsyncAdmin; @@ -94,7 +95,7 @@ public static void setUp() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class); UTIL.startMiniCluster(1); - UTIL.waitTableAvailable(TableName.META_TABLE_NAME); + UTIL.waitTableAvailable(MetaTableName.getInstance()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java index 0e00006251ac..76935cea8968 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; @@ -121,7 +122,7 @@ public void testVisitMetaForBadRegionState() throws Exception { put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, Bytes.toBytes("BAD_STATE")); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(put); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java index cd73e09af6db..e8a2f5202b25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; @@ -167,7 +168,7 @@ public void describeTo(Description description) { @Test public void testFailAndRollback() throws Exception { - HRegionServer rsWithMeta = UTIL.getRSForFirstRegionInTable(TableName.META_TABLE_NAME); + HRegionServer rsWithMeta = UTIL.getRSForFirstRegionInTable(MetaTableName.getInstance()); UTIL.getMiniHBaseCluster().killRegionServer(rsWithMeta.getServerName()); UTIL.waitFor(15000, () -> getSCPForServer(rsWithMeta.getServerName()) != null); ServerCrashProcedure scp = getSCPForServer(rsWithMeta.getServerName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java index 8a7169b09309..8a1f7b5601cb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java @@ -29,6 +29,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -146,7 +147,7 @@ static void validateReplicaDistribution(Connection connection, TableName tableNa static void validateRegionLocations(Map> tableToServers, TableName productTableName, boolean shouldBeBalanced) { ServerName metaServer = - tableToServers.get(TableName.META_TABLE_NAME).stream().findFirst().orElseThrow(); + tableToServers.get(MetaTableName.getInstance()).stream().findFirst().orElseThrow(); ServerName quotaServer = tableToServers.get(QuotaUtil.QUOTA_TABLE_NAME).stream().findFirst().orElseThrow(); Set productServers = tableToServers.get(productTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java index dcaebbd84356..068b15fbd662 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableName; @@ -161,7 +162,7 @@ public void testRoundRobinAssignment() throws Exception { LoadBalancer balancer = master.getLoadBalancer(); List regions = admin.getRegions(tableName); - regions.addAll(admin.getRegions(TableName.META_TABLE_NAME)); + regions.addAll(admin.getRegions(MetaTableName.getInstance())); List servers = Lists.newArrayList( admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet()); Map> map = balancer.roundRobinAssignment(regions, servers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java index 80f9728651e3..d179916cc6b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -92,8 +93,9 @@ public void testTableIsolation() throws Exception { admin.createTable(productTableDescriptor, BalancerConditionalsTestUtil.generateSplits(2 * NUM_SERVERS)); - Set tablesToBeSeparated = ImmutableSet. builder() - .add(TableName.META_TABLE_NAME).add(QuotaUtil.QUOTA_TABLE_NAME).add(productTableName).build(); + Set tablesToBeSeparated = + ImmutableSet. builder().add(MetaTableName.getInstance()) + .add(QuotaUtil.QUOTA_TABLE_NAME).add(productTableName).build(); // Pause the balancer admin.balancerSwitch(false, true); @@ -147,7 +149,7 @@ private static void validateRegionLocations(Map> tabl TableName productTableName, boolean shouldBeBalanced) { // Validate that the region assignments ServerName metaServer = - tableToServers.get(TableName.META_TABLE_NAME).stream().findFirst().orElseThrow(); + tableToServers.get(MetaTableName.getInstance()).stream().findFirst().orElseThrow(); ServerName quotaServer = tableToServers.get(QuotaUtil.QUOTA_TABLE_NAME).stream().findFirst().orElseThrow(); Set productServers = tableToServers.get(productTableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java index 88d1a298aa48..825fba807562 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -94,7 +95,7 @@ public static void tearDownAfterClass() throws Exception { @After public void tearDown() throws IOException { - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance()); ResultScanner scanner = table.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY) .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).setFilter(new FirstKeyOnlyFilter()))) { for (;;) { @@ -148,20 +149,20 @@ private void addBarrier(RegionInfo region, long... barriers) throws IOException put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); } - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(put); } } private void fillCatalogFamily(RegionInfo region) throws IOException { - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(new Put(region.getRegionName()).addColumn(HConstants.CATALOG_FAMILY, Bytes.toBytes("whatever"), Bytes.toBytes("whatever"))); } } private void clearCatalogFamily(RegionInfo region) throws IOException { - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.delete(new Delete(region.getRegionName()).addFamily(HConstants.CATALOG_FAMILY)); } } @@ -281,7 +282,7 @@ public void testDeleteRowForDeletedRegion() throws IOException, ReplicationExcep // No catalog family, then we should remove the whole row clearCatalogFamily(region); cleaner.chore(); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { assertFalse(table .exists(new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY))); } @@ -303,7 +304,7 @@ public void testDeleteRowForDeletedRegionNoPeers() throws IOException { // There are no peers, and no catalog family for this region either, so we should remove the // barriers. And since there is no catalog family, after we delete the barrier family, the whole // row is deleted. - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { assertFalse(table.exists(new Get(region.getRegionName()))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java index cf118260b401..b610c2750041 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaMockingUtil; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.Waiter; @@ -182,7 +183,7 @@ private PairOfSameType waitOnDaughters(final RegionInfo r) throws IO long start = EnvironmentEdgeManager.currentTime(); PairOfSameType pair = null; try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { + Table metaTable = conn.getTable(MetaTableName.getInstance())) { Result result = null; RegionInfo region = null; while ((EnvironmentEdgeManager.currentTime() - start) < 60000) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java index 614385ec04d6..880a3ffbd812 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.SortedSet; import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -50,21 +50,21 @@ public class TestMetaFixerNoCluster { private static byte[] D = Bytes.toBytes("d"); private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO; private static RegionInfo _ARI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build(); + RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).setEndKey(A).build(); private static RegionInfo _BRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build(); + RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).setEndKey(B).build(); private static RegionInfo ABRI = - RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build(); + RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).setStartKey(A).setEndKey(B).build(); private static RegionInfo ACRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(A).setEndKey(C).build(); private static RegionInfo CDRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(C).setEndKey(D).build(); private static RegionInfo ADRI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(A).setEndKey(D).build(); private static RegionInfo D_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(D).build(); private static RegionInfo C_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(C).build(); @Test public void testGetRegionInfoWithLargestEndKey() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index e931716e77ed..62aea9177513 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -56,6 +56,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.RegionMetrics; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Size; @@ -104,7 +105,7 @@ public void before() { @Test public void testNoNormalizationForMetaTable() { - TableName testTable = TableName.META_TABLE_NAME; + TableName testTable = MetaTableName.getInstance(); TableDescriptor testMetaTd = TableDescriptorBuilder.newBuilder(testTable).build(); List RegionInfo = new ArrayList<>(); Map regionSizes = new HashMap<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java index a878af785783..a9ebb47a6d84 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java @@ -34,9 +34,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNameTestRule; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -262,7 +262,7 @@ public String toString() { private static class PrimaryNotMetaRegionSelector extends RegionSelector { @Override boolean regionFilter(final RegionInfo info) { - return !Objects.equals(TableName.META_TABLE_NAME, info.getTable()) + return !Objects.equals(MetaTableName.getInstance(), info.getTable()) && Objects.equals(RegionInfo.DEFAULT_REPLICA_ID, info.getReplicaId()); } @@ -278,7 +278,7 @@ Exception regionFilterFailure() { private static class ReplicaNonMetaRegionSelector extends RegionSelector { @Override boolean regionFilter(RegionInfo info) { - return !Objects.equals(TableName.META_TABLE_NAME, info.getTable()) + return !Objects.equals(MetaTableName.getInstance(), info.getTable()) && !Objects.equals(RegionInfo.DEFAULT_REPLICA_ID, info.getReplicaId()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java index d2f04c674c97..c5e99471a7f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.client.AsyncAdmin; @@ -147,7 +148,7 @@ public static void tearDown() throws Exception { @Test public void test() throws Exception { RegionServerThread rsWithMetaThread = UTIL.getMiniHBaseCluster().getRegionServerThreads() - .stream().filter(t -> !t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) + .stream().filter(t -> !t.getRegionServer().getRegions(MetaTableName.getInstance()).isEmpty()) .findAny().get(); HRegionServer rsNoMeta = UTIL.getOtherRegionServer(rsWithMetaThread.getRegionServer()); FAIL = true; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java index 386356124f5b..cf58f01b7843 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.RegionInfo; @@ -122,7 +123,7 @@ public static class MetaTableProcedure extends Procedure @Override public TableName getTableName() { - return TableName.META_TABLE_NAME; + return MetaTableName.getInstance(); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java index 64454ab268fa..1fcf695da810 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.YouAreDeadException; @@ -131,7 +132,7 @@ public void test() throws Exception { HRegionServer regionSvr = UTIL.getRSForFirstRegionInTable(TABLE_NAME); HRegion region = regionSvr.getRegions(TABLE_NAME).get(0); String regName = region.getRegionInfo().getEncodedName(); - List metaRegs = regionSvr.getRegions(TableName.META_TABLE_NAME); + List metaRegs = regionSvr.getRegions(MetaTableName.getInstance()); if (metaRegs != null && !metaRegs.isEmpty()) { LOG.info("meta is on the same server: " + regionSvr); // when region is on same server as hbase:meta, reassigning meta would abort the server diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index af3902c9aa1e..801652080107 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -1002,7 +1003,8 @@ public void testShouldFlushMeta() throws Exception { TableDescriptors tds = new FSTableDescriptors(conf); FSTableDescriptors.tryUpdateMetaTableDescriptor(conf); HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf, - tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); + tds.get(MetaTableName.getInstance()), + wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO)); // parameterized tests add [#] suffix get rid of [ and ]. TableDescriptor desc = TableDescriptorBuilder .newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_"))) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 897152f8b6dd..ddcb9cec5446 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; @@ -429,7 +430,7 @@ public static void blockUntilRegionSplit(Configuration conf, long timeout, log("blocking until region is split:" + Bytes.toStringBinary(regionName)); RegionInfo daughterA = null, daughterB = null; try (Connection conn = ConnectionFactory.createConnection(conf); - Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { + Table metaTable = conn.getTable(MetaTableName.getInstance())) { Result result = null; RegionInfo region = null; while ((EnvironmentEdgeManager.currentTime() - start) < timeout) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index a435b9d9b239..900adcc38757 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; @@ -92,7 +93,7 @@ public void testUsingMetaAndBinary() throws IOException { // Up flush size else we bind up when we use default catalog flush of 16k. TableDescriptors tds = new FSTableDescriptors(UTIL.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(UTIL.getConfiguration()); - TableDescriptor td = tds.get(TableName.META_TABLE_NAME); + TableDescriptor td = tds.get(MetaTableName.getInstance()); td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build(); HRegion mr = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO, rootdir, conf, td); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java index 2869be090f42..ebc7c4d13d8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.RegionServerTests; @@ -73,12 +73,12 @@ public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedExce FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR); FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR); HRegion r = HBaseTestingUtil.createRegionAndWAL(ri, ROOT_DIR, CONF, - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(MetaTableName.getInstance())); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtil.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(ROOT_DIR, ri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, + r = HRegion.openHRegion(ROOT_DIR, ri, fsTableDescriptors.get(MetaTableName.getInstance()), null, CONF); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java index 60fe39ecc77f..fc0a2dd8a470 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -71,7 +72,7 @@ public class TestRegionInfo { public void testIsStart() { assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(Bytes.toBytes("not_start")).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(Bytes.toBytes("not_start")).build(); assertFalse(ri.isFirst()); } @@ -79,7 +80,7 @@ public void testIsStart() { public void testIsEnd() { assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst()); org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(Bytes.toBytes("not_end")).build(); + .newBuilder(MetaTableName.getInstance()).setEndKey(Bytes.toBytes("not_end")).build(); assertFalse(ri.isLast()); } @@ -87,9 +88,9 @@ public void testIsEnd() { public void testIsNext() { byte[] bytes = Bytes.toBytes("row"); org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(bytes).build(); + .newBuilder(MetaTableName.getInstance()).setEndKey(bytes).build(); org.apache.hadoop.hbase.client.RegionInfo ri2 = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(bytes).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(bytes).build(); assertFalse(ri.isNext(RegionInfoBuilder.FIRST_META_REGIONINFO)); assertTrue(ri.isNext(ri2)); } @@ -102,18 +103,18 @@ public void testIsOverlap() { byte[] d = Bytes.toBytes("d"); org.apache.hadoop.hbase.client.RegionInfo all = RegionInfoBuilder.FIRST_META_REGIONINFO; org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); + .newBuilder(MetaTableName.getInstance()).setEndKey(a).build(); org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance()) .setStartKey(a).setEndKey(b).build(); org.apache.hadoop.hbase.client.RegionInfo adri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance()) .setStartKey(a).setEndKey(d).build(); org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance()) .setStartKey(c).setEndKey(d).build(); org.apache.hadoop.hbase.client.RegionInfo dri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setStartKey(d).build(); + .newBuilder(MetaTableName.getInstance()).setStartKey(d).build(); assertTrue(all.isOverlap(all)); assertTrue(all.isOverlap(abri)); assertFalse(abri.isOverlap(cdri)); @@ -140,17 +141,17 @@ public void testIsOverlaps() { byte[] e = Bytes.toBytes("e"); byte[] f = Bytes.toBytes("f"); org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build(); + .newBuilder(MetaTableName.getInstance()).setEndKey(a).build(); org.apache.hadoop.hbase.client.RegionInfo abri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance()) .setStartKey(a).setEndKey(b).build(); org.apache.hadoop.hbase.client.RegionInfo eri = org.apache.hadoop.hbase.client.RegionInfoBuilder - .newBuilder(TableName.META_TABLE_NAME).setEndKey(e).build(); + .newBuilder(MetaTableName.getInstance()).setEndKey(e).build(); org.apache.hadoop.hbase.client.RegionInfo cdri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance()) .setStartKey(c).setEndKey(d).build(); org.apache.hadoop.hbase.client.RegionInfo efri = - org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME) + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance()) .setStartKey(e).setEndKey(f).build(); assertFalse(ari.isOverlap(abri)); assertTrue(abri.isOverlap(eri)); @@ -175,12 +176,12 @@ public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedExc FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + fsTableDescriptors.get(MetaTableName.getInstance())); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtil.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, + r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(MetaTableName.getInstance()), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); @@ -254,7 +255,7 @@ public void testContainsRange() { @Test public void testContainsRangeForMetaTable() { TableDescriptor tableDesc = - TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); + TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build(); RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build(); byte[] startRow = HConstants.EMPTY_START_ROW; byte[] row1 = Bytes.toBytes("a,a,0"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 68c6b6434c4f..559a7e72832c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TestMetaTableAccessor; import org.apache.hadoop.hbase.client.Consistency; @@ -147,7 +148,7 @@ public void testRegionReplicaUpdatesMetaLocation() throws Exception { openRegion(HTU, getRS(), hriSecondary); Table meta = null; try { - meta = HTU.getConnection().getTable(TableName.META_TABLE_NAME); + meta = HTU.getConnection().getTable(MetaTableName.getInstance()); TestMetaTableAccessor.assertMetaLocation(meta, hriPrimary.getRegionName(), getRS().getServerName(), -1, 1, false); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java index 3fad6e16bf76..bb2b2f0abd0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java @@ -22,6 +22,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -65,7 +66,7 @@ public static void setUp() throws Exception { UTIL.createTable(TABLE_NAME, CF); UTIL.waitTableAvailable(TABLE_NAME); HRegionServer rs = UTIL.getRSForFirstRegionInTable(TABLE_NAME); - if (!rs.getRegions(TableName.META_TABLE_NAME).isEmpty()) { + if (!rs.getRegions(MetaTableName.getInstance()).isEmpty()) { HRegionServer rs1 = UTIL.getOtherRegionServer(rs); UTIL.moveRegionAndWait( UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0).getRegionInfo(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index 522b0ea884b3..e21606458761 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -97,11 +98,12 @@ public static void stopMasterAndCacheMetaLocation(HBaseTestingUtil HTU) // cache meta location, so we will not go to master to lookup meta region location for (JVMClusterUtil.RegionServerThread t : HTU.getMiniHBaseCluster().getRegionServerThreads()) { try (RegionLocator locator = - t.getRegionServer().getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + t.getRegionServer().getConnection().getRegionLocator(MetaTableName.getInstance())) { locator.getAllRegionLocations(); } } - try (RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) { + try ( + RegionLocator locator = HTU.getConnection().getRegionLocator(MetaTableName.getInstance())) { locator.getAllRegionLocations(); } // Stop master diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java index 61da536310a8..e42209ae78b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.TableName; @@ -135,7 +136,7 @@ public void testRejectRequestsOnAbort() throws Exception { .getRegionServerThreads()) { HRegionServer regionServer = regionServerThread.getRegionServer(); if ( - regionServer.getRegions(TableName.META_TABLE_NAME).isEmpty() + regionServer.getRegions(MetaTableName.getInstance()).isEmpty() && !regionServer.getRegions(TABLE_NAME).isEmpty() ) { serverWithoutMeta = regionServer; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java index 0bc7deccc121..a5ad29af1ea8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; import org.apache.hadoop.hbase.YouAreDeadException; @@ -135,7 +136,7 @@ public void test() throws Exception { RegionServerThread rst1 = UTIL.getMiniHBaseCluster().getRegionServerThreads().get(1); HRegionServer liveRS; RegionServerThread toKillRSThread; - if (rst1.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) { + if (rst1.getRegionServer().getRegions(MetaTableName.getInstance()).isEmpty()) { liveRS = rst0.getRegionServer(); toKillRSThread = rst1; } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 2a5aec458828..24e393fbe716 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; @@ -338,7 +339,7 @@ void validateData(Table table, int rownum) throws IOException { public void testCompactionRecordDoesntBlockRolling() throws Exception { // When the hbase:meta table can be opened, the region servers are running - try (Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + try (Table t = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()); Table table = createTestTable(getName())) { server = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java index 3c3dbe1ead9e..bc8ee0a836d6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableName; @@ -162,7 +163,7 @@ public void testRSAbortWithUnflushedEdits() throws Exception { LOG.info("Starting testRSAbortWithUnflushedEdits()"); // When the hbase:meta table can be opened, the region servers are running - TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); + TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()).close(); // Create the test table and open it TableName tableName = TableName.valueOf(this.getClass().getSimpleName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 43477f21f7f8..3ba45bdf4915 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.Put; @@ -322,7 +323,7 @@ public void testLogRollOnPipelineRestart() throws Exception { fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1); LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS())); // When the hbase:meta table can be opened, the region servers are running - Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + Table t = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()); try { this.server = cluster.getRegionServer(0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index dd4fe77c8a38..635b8abcc7ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -95,7 +95,7 @@ public void testContendedLogRolling() throws Exception { CommonFSUtils.setRootDir(conf, dir); FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(TEST_UTIL.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration()); - TableDescriptor metaTableDescriptor = fsTableDescriptors.get(TableName.META_TABLE_NAME); + TableDescriptor metaTableDescriptor = fsTableDescriptors.get(MetaTableName.getInstance()); conf.set(FSHLogProvider.WRITER_IMPL, HighLatencySyncWriter.class.getName()); final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName()); final WAL wal = wals.getWAL(null); @@ -159,7 +159,7 @@ public void run() { try { TableDescriptors tds = new FSTableDescriptors(TEST_UTIL.getConfiguration()); FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration()); - TableDescriptor htd = tds.get(TableName.META_TABLE_NAME); + TableDescriptor htd = tds.get(MetaTableName.getInstance()); for (int i = 0; i < this.count; i++) { long now = EnvironmentEdgeManager.currentTime(); // Roll every ten edits @@ -176,7 +176,7 @@ public void run() { scopes.put(fam, 0); } final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, now, mvcc, scopes), edit); + MetaTableName.getInstance(), now, mvcc, scopes), edit); Threads.sleep(ThreadLocalRandom.current().nextInt(5)); wal.sync(txid); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java index 897166a94000..522bd5ac1367 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.security.access.PermissionStorage; @@ -72,7 +73,7 @@ public void testSystemTableWALEntryFilter() { // meta WALKeyImpl key1 = new WALKeyImpl(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), - TableName.META_TABLE_NAME, EnvironmentEdgeManager.currentTime()); + MetaTableName.getInstance(), EnvironmentEdgeManager.currentTime()); Entry metaEntry = new Entry(key1, null); assertNull(filter.filter(metaEntry)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java index ab7935ddad41..c78c967563e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetaRegionReplicaReplication.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; @@ -98,10 +99,11 @@ public void before() throws Exception { // conf.setInt(HConstants.META_REPLICAS_NUM, numOfMetaReplica); HTU.startMiniCluster(NB_SERVERS); // Enable hbase:meta replication. - HBaseTestingUtil.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, numOfMetaReplica); + HBaseTestingUtil.setReplicas(HTU.getAdmin(), MetaTableName.getInstance(), numOfMetaReplica); - HTU.waitFor(30000, () -> HTU.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() - >= numOfMetaReplica); + HTU.waitFor(30000, + () -> HTU.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() + >= numOfMetaReplica); } @After @@ -118,35 +120,37 @@ public void testHBaseMetaReplicates() throws Exception { try (Table table = HTU.createTable(TableName.valueOf(this.name.getMethodName() + "_0"), HConstants.CATALOG_FAMILY, Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length))) { - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, getMetaCells(table.getName())); + verifyReplication(MetaTableName.getInstance(), numOfMetaReplica, + getMetaCells(table.getName())); } try (Table table = HTU.createTable(TableName.valueOf(this.name.getMethodName() + "_1"), HConstants.CATALOG_FAMILY, Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length))) { - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, getMetaCells(table.getName())); + verifyReplication(MetaTableName.getInstance(), numOfMetaReplica, + getMetaCells(table.getName())); // Try delete. HTU.deleteTableIfAny(table.getName()); - verifyDeletedReplication(TableName.META_TABLE_NAME, numOfMetaReplica, table.getName()); + verifyDeletedReplication(MetaTableName.getInstance(), numOfMetaReplica, table.getName()); } } @Test public void testCatalogReplicaReplicationWithFlushAndCompaction() throws Exception { try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); - Table table = connection.getTable(TableName.META_TABLE_NAME)) { + Table table = connection.getTable(MetaTableName.getInstance())) { // load the data to the table for (int i = 0; i < 5; i++) { LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000)); HTU.loadNumericRows(table, HConstants.CATALOG_FAMILY, i * 1000, i * 1000 + 1000); LOG.info("flushing table"); - HTU.flush(TableName.META_TABLE_NAME); + HTU.flush(MetaTableName.getInstance()); LOG.info("compacting table"); if (i < 4) { - HTU.compact(TableName.META_TABLE_NAME, false); + HTU.compact(MetaTableName.getInstance(), false); } } - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, 0, 5000, + verifyReplication(MetaTableName.getInstance(), numOfMetaReplica, 0, 5000, HConstants.CATALOG_FAMILY); } } @@ -181,7 +185,7 @@ public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception { } } try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); - Table table = connection.getTable(TableName.META_TABLE_NAME)) { + Table table = connection.getTable(MetaTableName.getInstance())) { // load the data to the table for (int i = 0; i < 5; i++) { LOG.info("Writing data from " + i * 1000 + " to " + (i * 1000 + 1000)); @@ -191,7 +195,7 @@ public void testCatalogReplicaReplicationWithReplicaMoved() throws Exception { } } - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, 0, 5000, + verifyReplication(MetaTableName.getInstance(), numOfMetaReplica, 0, 5000, HConstants.CATALOG_FAMILY); } } @@ -413,7 +417,7 @@ private void getMetaReplicaReadRequests(final Region[] metaRegions, final long[] @Test public void testHBaseMetaReplicaGets() throws Exception { TableName tn = TableName.valueOf(this.name.getMethodName()); - final Region[] metaRegions = getAllRegions(TableName.META_TABLE_NAME, numOfMetaReplica); + final Region[] metaRegions = getAllRegions(MetaTableName.getInstance(), numOfMetaReplica); long[] readReqsForMetaReplicas = new long[numOfMetaReplica]; long[] readReqsForMetaReplicasAfterGet = new long[numOfMetaReplica]; long[] readReqsForMetaReplicasAfterGetAllLocations = new long[numOfMetaReplica]; @@ -426,7 +430,8 @@ public void testHBaseMetaReplicaGets() throws Exception { try (Table table = HTU.createTable(tn, HConstants.CATALOG_FAMILY, Arrays.copyOfRange(HBaseTestingUtil.KEYS, 1, HBaseTestingUtil.KEYS.length))) { - verifyReplication(TableName.META_TABLE_NAME, numOfMetaReplica, getMetaCells(table.getName())); + verifyReplication(MetaTableName.getInstance(), numOfMetaReplica, + getMetaCells(table.getName())); // load different values HTU.loadTable(table, new byte[][] { HConstants.CATALOG_FAMILY }, VALUE); for (int i = 0; i < NB_SERVERS; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java index 37af52eb93b9..c6d01d0e7a51 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSource.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; @@ -196,7 +197,7 @@ public void testWALEntryFilter() throws IOException { assertTrue(wef.filter(e) == e); // Test system WAL edit. e = new WAL.Entry( - new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TableName.META_TABLE_NAME, -1, -1, uuid), we); + new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, MetaTableName.getInstance(), -1, -1, uuid), we); assertNull(wef.filter(e)); } finally { rs.terminate("Done"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java index 8731adbe4c2b..be93b44d8a62 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestSerialReplicationChecker.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -162,7 +163,7 @@ private void addStateAndBarrier(RegionInfo region, RegionState.State state, long put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); } - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(put); } } @@ -171,7 +172,7 @@ private void setState(RegionInfo region, RegionState.State state) throws IOExcep Put put = new Put(region.getRegionName(), EnvironmentEdgeManager.currentTime()); put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER, Bytes.toBytes(state.name())); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(put); } } @@ -188,7 +189,7 @@ private void addParents(RegionInfo region, List parents) throws IOEx put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, ReplicationBarrierFamilyFormat.REPLICATION_PARENT_QUALIFIER, ReplicationBarrierFamilyFormat.getParentsBytes(parents)); - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(put); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java index cb53f77bce56..9f6019974574 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsCPHookCalled.java @@ -20,9 +20,9 @@ import static org.junit.Assert.assertTrue; import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RSGroupTests; import org.junit.After; @@ -69,7 +69,7 @@ public void testGetRSGroupInfoCPHookCalled() throws Exception { @Test public void testGetRSGroupInfoOfTableCPHookCalled() throws Exception { - ADMIN.getRSGroup(TableName.META_TABLE_NAME); + ADMIN.getRSGroup(MetaTableName.getInstance()); assertTrue(OBSERVER.preGetRSGroupInfoOfTableCalled); assertTrue(OBSERVER.postGetRSGroupInfoOfTableCalled); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java index 3a596a02e0a6..5510ac230111 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsKillRS.java @@ -29,6 +29,7 @@ import java.util.Set; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -246,10 +247,10 @@ public void testLowerMetaGroupVersion() throws Exception { // move hbase:meta to meta_group Set toAddTables = new HashSet<>(); - toAddTables.add(TableName.META_TABLE_NAME); + toAddTables.add(MetaTableName.getInstance()); ADMIN.setRSGroup(toAddTables, groupName); assertTrue(ADMIN.getConfiguredNamespacesAndTablesInRSGroup(groupName).getSecond() - .contains(TableName.META_TABLE_NAME)); + .contains(MetaTableName.getInstance())); // restart the regionserver in meta_group, and lower its version String originVersion = ""; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java index dffeaa206a24..3a9ccca4be5e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -236,7 +237,7 @@ public void testRunCleanerChore() throws Exception { public void testExecProcedure() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. - admin.execProcedure("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), + admin.execProcedure("flush-table-proc", MetaTableName.getInstance().getNameAsString(), new HashMap<>()); }); } @@ -259,8 +260,8 @@ public void testExecService() throws Exception { public void testExecProcedureWithRet() throws Exception { verifyAdminCheckForAction((admin) -> { // Using existing table instead of creating a new one. - admin.execProcedureWithReturn("flush-table-proc", TableName.META_TABLE_NAME.getNameAsString(), - new HashMap<>()); + admin.execProcedureWithReturn("flush-table-proc", + MetaTableName.getInstance().getNameAsString(), new HashMap<>()); }); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java index f132eb6964b1..e6b2775f2921 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -29,7 +29,7 @@ import java.util.Collection; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.client.AsyncConnection; import org.apache.hadoop.hbase.client.AsyncTable; import org.apache.hadoop.hbase.client.Connection; @@ -97,7 +97,7 @@ public void setUpBeforeMethod() { private void testToken() throws Exception { try (AsyncConnection conn = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get()) { - AsyncTable table = conn.getTable(TableName.META_TABLE_NAME); + AsyncTable table = conn.getTable(MetaTableName.getInstance()); WhoAmIResponse response = table. coprocessorService( AuthenticationService::newStub, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java index 06d64ace3be2..8a941ee65ce4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -86,7 +87,7 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster(); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(MetaTableName.getInstance()); rootDir = CommonFSUtils.getRootDir(conf); fs = TEST_UTIL.getTestFileSystem(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index 0b989b8029f0..0d0bd7b81b1c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -198,7 +199,8 @@ protected void deleteRegion(Configuration conf, final TableDescriptor htd, byte[ } if (metaRow) { - try (Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService)) { + try ( + Table meta = connection.getTable(MetaTableName.getInstance(), tableExecutorService)) { Delete delete = new Delete(deleteRow); meta.delete(delete); } @@ -510,7 +512,7 @@ public boolean tableHasErrors(HbckTableInfo table) { protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException { - HRegionLocation metaLocation = connection.getRegionLocator(TableName.META_TABLE_NAME) + HRegionLocation metaLocation = connection.getRegionLocator(MetaTableName.getInstance()) .getRegionLocation(HConstants.EMPTY_START_ROW); ServerName hsa = metaLocation.getServerName(); RegionInfo hri = metaLocation.getRegion(); @@ -525,8 +527,8 @@ protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hd LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString()); Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = - new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(), hri.getEncodedName()); + Path p = new Path(rootDir + "/" + MetaTableName.getInstance().getNameAsString(), + hri.getEncodedName()); Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); fs.delete(hriPath, true); } @@ -535,8 +537,8 @@ protected void deleteMetaRegion(Configuration conf, boolean unassign, boolean hd LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString()); Path rootDir = CommonFSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = - new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(), hri.getEncodedName()); + Path p = new Path(rootDir + "/" + MetaTableName.getInstance().getNameAsString(), + hri.getEncodedName()); HBaseFsck.debugLsr(conf, p); boolean success = fs.delete(p, true); LOG.info("Deleted " + p + " sucessfully? " + success); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 5e2b4b52950f..9a25654f87de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -279,8 +280,8 @@ public void testGetAll() throws IOException, InterruptedException { TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build()); } // add hbase:meta - htds - .createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); + htds.createTableDescriptor( + TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build()); assertEquals("getAll() didn't return all TableDescriptors, expected: " + (count + 1) + " got: " + htds.getAll().size(), count + 1, htds.getAll().size()); } @@ -298,8 +299,8 @@ public void testParallelGetAll() throws IOException, InterruptedException { TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build()); } // add hbase:meta - htds - .createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build()); + htds.createTableDescriptor( + TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build()); int getTableDescriptorSize = htds.getAll().size(); assertEquals("getAll() didn't return all TableDescriptors, expected: " + (count + 1) + " got: " @@ -326,7 +327,7 @@ public void testGetAllOrdering() throws Exception { // Remove hbase:meta from list. It shows up now since we made it dynamic. The schema // is written into the fs by the FSTableDescriptors constructor now where before it // didn't. - tables.remove(TableName.META_TABLE_NAME.getNameAsString()); + tables.remove(MetaTableName.getInstance().getNameAsString()); assertEquals(4, tables.size()); String[] tableNamesOrdered = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java index c8e96383492a..db5bc77a367c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckCleanReplicationBarriers.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; @@ -198,7 +199,7 @@ private void addStateAndBarrier(RegionInfo region, RegionState.State state, long put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER, put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i])); } - try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { + try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) { table.put(put); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index b24b721762d3..04f7d74b950b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.io.hfile.TestHFile; @@ -84,7 +85,7 @@ public static void setUpBeforeClass() throws Exception { admin = connection.getAdmin(); admin.balancerSwitch(false, true); - TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(MetaTableName.getInstance()); } @AfterClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java index 503f0dbdd971..508c2e63ab25 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMover1.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.TableName; @@ -287,7 +288,7 @@ public void testRegionServerPort() throws Exception { public void testLoadMetaRegion() throws Exception { HRegionServer rsWithMeta = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get(); + .filter(rs -> rs.getRegions(MetaTableName.getInstance()).size() > 0).findFirst().get(); int onlineRegions = rsWithMeta.getNumberOfOnlineRegions(); String rsName = rsWithMeta.getServerName().getAddress().toString(); try (RegionMover rm = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java index 56e103aa612e..594141f7e802 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionMoverWithRSGroupEnable.java @@ -26,6 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtil; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -86,7 +87,7 @@ public void setUp() throws Exception { // Remove rs contains hbase:meta, otherwise test looks unstable and buggy in test env. ServerName rsContainMeta = TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads().stream() .map(t -> t.getRegionServer()) - .filter(rs -> rs.getRegions(TableName.META_TABLE_NAME).size() > 0).findFirst().get() + .filter(rs -> rs.getRegions(MetaTableName.getInstance()).size() > 0).findFirst().get() .getServerName(); LOG.info("{} contains hbase:meta", rsContainMeta); List modifiable = new ArrayList<>(allServers); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index cc1fefc266c4..ef31c35dfca2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -456,7 +457,7 @@ public void testRecoveredEditsPathForMeta() throws IOException { @Test public void testOldRecoveredEditsFileSidelined() throws IOException { Path p = createRecoveredEditsPathForRegion(); - Path tdir = CommonFSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); + Path tdir = CommonFSUtils.getTableDir(HBASEDIR, MetaTableName.getInstance()); Path regiondir = new Path(tdir, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName()); fs.mkdirs(regiondir); Path parent = WALSplitUtil.getRegionDirRecoveredEditsDir(regiondir); @@ -469,7 +470,7 @@ public void testOldRecoveredEditsFileSidelined() throws IOException { private Path createRecoveredEditsPathForRegion() throws IOException { byte[] encoded = RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); - Path p = WALSplitUtil.getRegionSplitEditsPath(TableName.META_TABLE_NAME, encoded, 1, + Path p = WALSplitUtil.getRegionSplitEditsPath(MetaTableName.getInstance(), encoded, 1, FILENAME_BEING_SPLIT, TMPDIRNAME, conf, ""); return p; } diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index aed5e1c26e59..ff852b357ffc 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -748,7 +748,7 @@ def get_all_columns # Checks if current table is one of the 'meta' tables def is_meta_table? - org.apache.hadoop.hbase.TableName::META_TABLE_NAME.equals(@table.getName) + org.apache.hadoop.hbase.MetaTableName.getInstance.equals(@table.getName) end # Given a column specification in the format FAMILY[:QUALIFIER[:CONVERTER]] diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java index 27f3dd4f43ab..eb40f4eee439 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java @@ -343,7 +343,7 @@ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOExce * Get the ServerName of region server serving the first hbase:meta region */ public ServerName getServerHoldingMeta() throws IOException { - return getServerHoldingRegion(TableName.META_TABLE_NAME, + return getServerHoldingRegion(MetaTableName.getInstance(), RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()); } diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 1dea0ba4c367..f5d80730820f 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1104,7 +1104,7 @@ public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option) // Populate the master address configuration from mini cluster configuration. conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c)); // Don't leave here till we've done a successful scan of the hbase:meta - try (Table t = getConnection().getTable(TableName.META_TABLE_NAME); + try (Table t = getConnection().getTable(MetaTableName.getInstance()); ResultScanner s = t.getScanner(new Scan())) { for (;;) { if (s.next() == null) { @@ -1226,7 +1226,7 @@ public void restartHBaseCluster(StartMiniClusterOption option) option.getMasterClass(), option.getRsClass()); // Don't leave here till we've done a successful scan of the hbase:meta Connection conn = ConnectionFactory.createConnection(this.conf); - Table t = conn.getTable(TableName.META_TABLE_NAME); + Table t = conn.getTable(MetaTableName.getInstance()); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { // do nothing @@ -2394,7 +2394,7 @@ public String checksumRows(final Table table) throws Exception { */ public List createMultiRegionsInMeta(final Configuration conf, final TableDescriptor htd, byte[][] startKeys) throws IOException { - Table meta = getConnection().getTable(TableName.META_TABLE_NAME); + Table meta = getConnection().getTable(MetaTableName.getInstance()); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); List newRegions = new ArrayList<>(startKeys.length); MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(), @@ -2476,7 +2476,7 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD */ public List getMetaTableRows() throws IOException { // TODO: Redo using MetaTableAccessor class - Table t = getConnection().getTable(TableName.META_TABLE_NAME); + Table t = getConnection().getTable(MetaTableName.getInstance()); List rows = new ArrayList<>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { @@ -2494,7 +2494,7 @@ public List getMetaTableRows() throws IOException { */ public List getMetaTableRows(TableName tableName) throws IOException { // TODO: Redo using MetaTableAccessor. - Table t = getConnection().getTable(TableName.META_TABLE_NAME); + Table t = getConnection().getTable(MetaTableName.getInstance()); List rows = new ArrayList<>(); ResultScanner s = t.getScanner(new Scan()); for (Result result : s) { @@ -2824,7 +2824,7 @@ public void process(WatchedEvent watchedEvent) { monitor.close(); if (checkStatus) { - getConnection().getTable(TableName.META_TABLE_NAME).close(); + getConnection().getTable(MetaTableName.getInstance()).close(); } } @@ -3347,7 +3347,7 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce * Waith until all system table's regions get assigned */ public void waitUntilAllSystemRegionsAssigned() throws IOException { - waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + waitUntilAllRegionsAssigned(MetaTableName.getInstance()); } /** @@ -3360,7 +3360,7 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException { public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout) throws IOException { if (!TableName.isMetaTableName(tableName)) { - try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) { + try (final Table meta = getConnection().getTable(MetaTableName.getInstance())) { LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = " + timeout + "ms"); waitFor(timeout, 200, true, new ExplainingPredicate() { @@ -3578,7 +3578,7 @@ public Table createRandomTable(TableName tableName, final Collection fam Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions); if (hbaseCluster != null) { - getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME); + getMiniHBaseCluster().flushcache(MetaTableName.getInstance()); } BufferedMutator mutator = getConnection().getBufferedMutator(tableName); @@ -3793,7 +3793,7 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto } public static int getMetaRSPort(Connection connection) throws IOException { - try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) { + try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) { return locator.getRegionLocation(Bytes.toBytes("")).getPort(); } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java index 1900c6c0f8da..19aa72487d5c 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -1073,11 +1074,11 @@ public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError { try { byte[] row = getBytes(searchRow); Result startRowResult = - getReverseScanResult(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY); + getReverseScanResult(MetaTableName.getInstance().getName(), row, HConstants.CATALOG_FAMILY); if (startRowResult == null) { - throw new IOException( - "Cannot find row in " + TableName.META_TABLE_NAME + ", row=" + Bytes.toStringBinary(row)); + throw new IOException("Cannot find row in " + MetaTableName.getInstance() + ", row=" + + Bytes.toStringBinary(row)); } // find region start and end keys diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java index d34600bc5d3a..a9fe61d1d4b8 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java @@ -19,6 +19,7 @@ import com.google.errorprone.annotations.RestrictedApi; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.RegionInfo; @@ -165,11 +166,12 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, int replicaId, RegionState.State state) throws KeeperException { if (serverName == null) { - LOG.warn("Tried to set null ServerName in hbase:meta; skipping -- ServerName required"); + LOG.warn("Tried to set null ServerName in {}; skipping -- ServerName required", + MetaTableName.getInstance()); return; } - LOG.info("Setting hbase:meta replicaId={} location in ZooKeeper as {}, state={}", replicaId, - serverName, state); + LOG.info("Setting {} replicaId={} location in ZooKeeper as {}, state={}", + MetaTableName.getInstance(), replicaId, serverName, state); // Make the MetaRegionServer pb and then get its bytes and save this as // the znode content. MetaRegionServer pbrsr = @@ -180,10 +182,10 @@ public static void setMetaLocation(ZKWatcher zookeeper, ServerName serverName, i ZKUtil.setData(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); } catch (KeeperException.NoNodeException nne) { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { - LOG.debug("hbase:meta region location doesn't exist, create it"); + LOG.debug("{} region location doesn't exist, create it", MetaTableName.getInstance()); } else { - LOG.debug( - "hbase:meta region location doesn't exist for replicaId=" + replicaId + ", create it"); + LOG.debug("{} region location doesn't exist for replicaId={}, create it", + MetaTableName.getInstance(), replicaId); } ZKUtil.createAndWatch(zookeeper, zookeeper.getZNodePaths().getZNodeForReplica(replicaId), data); @@ -233,9 +235,10 @@ public static void deleteMetaLocation(ZKWatcher zookeeper) throws KeeperExceptio public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId) throws KeeperException { if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) { - LOG.info("Deleting hbase:meta region location in ZooKeeper"); + LOG.info("Deleting {} region location in ZooKeeper", MetaTableName.getInstance()); } else { - LOG.info("Deleting hbase:meta for {} region location in ZooKeeper", replicaId); + LOG.info("Deleting {} for {} region location in ZooKeeper", MetaTableName.getInstance(), + replicaId); } try { // Just delete the node. Don't need any watches. diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java index cd2b0b1a0149..f5f250ff1354 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKDump.java @@ -32,6 +32,7 @@ import java.util.LinkedList; import java.util.List; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MetaTableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; @@ -74,7 +75,7 @@ public static String dump(final ZKWatcher zkWatcher) { sb.append("\n ").append(child); } } - sb.append("\nRegion server holding hbase:meta:"); + sb.append("\nRegion server holding ").append(MetaTableName.getInstance()).append(":"); sb.append("\n ").append(MetaTableLocator.getMetaRegionLocation(zkWatcher)); int numMetaReplicas = zkWatcher.getMetaReplicaNodes().size(); for (int i = 1; i < numMetaReplicas; i++) {