diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 20884edf836e..ec5a04a63a05 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Connection;
@@ -169,7 +170,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps,
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
+ LOG.debug("Skip {} log file: {}", MetaTableName.getInstance(), log.getPath().getName());
}
continue;
}
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 6c021bf622a5..d316075979a1 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -36,8 +36,8 @@
import org.apache.hadoop.hbase.CellBuilderType;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
@@ -131,7 +131,7 @@ public static void updateMetaWithFavoredNodesInfo(
puts.add(put);
}
}
- try (Table table = connection.getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = connection.getTable(MetaTableName.getInstance())) {
table.put(puts);
}
LOG.info("Added " + puts.size() + " region favored nodes in META");
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index 02c18c73bfb5..e54ce7478e99 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -170,9 +171,10 @@ private void processMetaRecord(Result result) throws IOException {
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {
- LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
+ LOG.info("Start to scan {} for the current region assignment snapshot",
+ MetaTableName.getInstance());
// Scan hbase:meta to pick up user regions
- try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
+ try (Table metaTable = connection.getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) {
for (;;) {
Result result = scanner.next();
@@ -187,7 +189,8 @@ public void initialize() throws IOException {
}
}
}
- LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
+ LOG.info("Finished scanning {} for the current region assignment snapshot",
+ MetaTableName.getInstance());
}
private void addRegion(RegionInfo regionInfo) {
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
index 3548571286c0..08ea314ae337 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
@@ -27,6 +27,7 @@
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -68,7 +69,7 @@ public static void setup() {
// Create regions
List allRegions = new ArrayList<>();
for (int i = 0; i < NUM_REGIONS; i++) {
- TableName tableName = i < 3 ? TableName.META_TABLE_NAME : NON_META_TABLE_NAME;
+ TableName tableName = i < 3 ? MetaTableName.getInstance() : NON_META_TABLE_NAME;
byte[] startKey = new byte[1];
startKey[0] = (byte) i;
byte[] endKey = new byte[1];
@@ -95,7 +96,7 @@ public void testMetaTableIsolation() {
}
private boolean isMetaTableIsolated(BalancerClusterState cluster) {
- return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta");
+ return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta");
}
}
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
index 0ea739faf78b..9f552debf324 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
@@ -28,6 +28,7 @@
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -72,7 +73,7 @@ public static void setup() {
for (int i = 0; i < NUM_REGIONS; i++) {
TableName tableName;
if (i < 1) {
- tableName = TableName.META_TABLE_NAME;
+ tableName = MetaTableName.getInstance();
} else if (i < 10) {
tableName = SYSTEM_TABLE_NAME;
} else {
@@ -116,7 +117,7 @@ public void testTableIsolationAndReplicaDistribution() {
* Validates whether all meta table regions are isolated.
*/
private boolean isMetaTableIsolated(BalancerClusterState cluster) {
- return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta");
+ return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta");
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
index 42bfd757e0d1..bf8513ee959d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
@@ -196,7 +196,7 @@ private static CompletableFuture>> getTableReg
final AsyncTable metaTable, final TableName tableName,
final boolean excludeOfflinedSplitParents) {
CompletableFuture>> future = new CompletableFuture<>();
- if (TableName.META_TABLE_NAME.equals(tableName)) {
+ if (MetaTableName.getInstance().equals(tableName)) {
future.completeExceptionally(new IOException(
"This method can't be used to locate meta regions;" + " use MetaTableLocator instead"));
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index e26fb837b89d..2ef315d2b4ac 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -22,7 +22,6 @@
import static org.apache.hadoop.hbase.HConstants.NINES;
import static org.apache.hadoop.hbase.HConstants.USE_META_REPLICAS;
import static org.apache.hadoop.hbase.HConstants.ZEROES;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
import static org.apache.hadoop.hbase.client.ConnectionConfiguration.HBASE_CLIENT_META_CACHE_INVALIDATE_INTERVAL;
@@ -52,6 +51,7 @@
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -238,14 +238,15 @@ private boolean tryComplete(LocateRequest req, CompletableFuture {
+ .createSelector(replicaSelectorClass, MetaTableName.getInstance(), conn, () -> {
int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
try {
RegionLocations metaLocations = conn.registry.getMetaRegionLocations()
.get(conn.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
numOfReplicas = metaLocations.size();
} catch (Exception e) {
- LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
+ LOG.error("Failed to get table {}'s region replication, ",
+ MetaTableName.getInstance(), e);
}
return numOfReplicas;
});
@@ -427,7 +428,7 @@ private void locateInMeta(TableName tableName, LocateRequest req) {
// do nothing
}
- conn.getTable(META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() {
+ conn.getTable(MetaTableName.getInstance()).scan(scan, new AdvancedScanResultConsumer() {
private boolean completeNormally = false;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
index 0e872a5b21da..cc10308b6327 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY;
import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY;
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
@@ -36,6 +35,7 @@
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -217,7 +217,7 @@ void clearCache(TableName tableName) {
new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName);
TraceUtil.trace(() -> {
LOG.debug("Clear meta cache for {}", tableName);
- if (tableName.equals(META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
metaRegionLocator.clearCache();
} else {
nonMetaRegionLocator.clearCache(tableName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
index b7ec7fcd8725..32349a64651d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
@@ -25,6 +25,7 @@
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
@@ -63,7 +64,7 @@ public CompletableFuture> getAllRegionLocations() {
.thenApply(locs -> Arrays.asList(locs.getRegionLocations()));
}
CompletableFuture> future = ClientMetaTableAccessor
- .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName);
+ .getTableHRegionLocations(conn.getTable(MetaTableName.getInstance()), tableName);
addListener(future, (locs, error) -> locs.forEach(loc -> {
// the cache assumes that all locations have a serverName. only add if that's true
if (loc.getServerName() != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index d6d8e00f7822..5e629839782f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -120,7 +121,7 @@ private static int checkReplicaId(int regionId) {
this.replicaId = checkReplicaId(replicaId);
this.offLine = offLine;
this.regionName = RegionInfo.createRegionName(this.tableName, this.startKey, this.regionId,
- this.replicaId, !this.tableName.equals(TableName.META_TABLE_NAME));
+ this.replicaId, !this.tableName.equals(MetaTableName.getInstance()));
this.encodedName = RegionInfo.encodeRegionName(this.regionName);
this.hashCode = generateHashCode(this.tableName, this.startKey, this.endKey, this.regionId,
this.replicaId, this.offLine, this.regionName);
@@ -232,7 +233,7 @@ public boolean containsRow(byte[] row) {
/** Returns true if this region is a meta region */
@Override
public boolean isMetaRegion() {
- return tableName.equals(TableName.META_TABLE_NAME);
+ return tableName.equals(MetaTableName.getInstance());
}
/** Returns True if has been split and has daughters. */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 83780a4a1219..b9228c66394e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.HConstants.HIGH_QOS;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
import static org.apache.hadoop.hbase.util.FutureUtils.unwrapCompletionException;
@@ -57,6 +56,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RegionMetrics;
@@ -403,7 +403,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
AsyncAdminBuilderBase builder) {
this.connection = connection;
this.retryTimer = retryTimer;
- this.metaTable = connection.getTable(META_TABLE_NAME);
+ this.metaTable = connection.getTable(MetaTableName.getInstance());
this.rpcTimeoutNs = builder.rpcTimeoutNs;
this.operationTimeoutNs = builder.operationTimeoutNs;
this.pauseNs = builder.pauseNs;
@@ -995,7 +995,7 @@ List> adminCall(controller, stub,
@Override
public CompletableFuture> getRegions(TableName tableName) {
- if (tableName.equals(META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
return connection.registry.getMetaRegionLocations()
.thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
.collect(Collectors.toList()));
@@ -1286,7 +1286,7 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa
* List all region locations for the specific table.
*/
private CompletableFuture> getTableHRegionLocations(TableName tableName) {
- if (TableName.META_TABLE_NAME.equals(tableName)) {
+ if (MetaTableName.getInstance().equals(tableName)) {
CompletableFuture> future = new CompletableFuture<>();
addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> {
if (err != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 10c554e26f79..bc3b48a54a28 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -26,6 +26,7 @@
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
@@ -431,7 +432,7 @@ static byte[] toByteArray(RegionInfo ri) {
*/
static String prettyPrint(final String encodedRegionName) {
if (encodedRegionName.equals("1028785192")) {
- return encodedRegionName + "/hbase:meta";
+ return encodedRegionName + "/" + MetaTableName.getInstance();
}
return encodedRegionName;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index ef927fd3a55b..1c2aab455b55 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
@@ -43,7 +44,7 @@ public class RegionInfoBuilder {
// TODO: How come Meta regions still do not have encoded region names? Fix.
// hbase:meta,,1.1588230740 should be the hbase:meta first region name.
public static final RegionInfo FIRST_META_REGIONINFO =
- new MutableRegionInfo(1L, TableName.META_TABLE_NAME, RegionInfo.DEFAULT_REPLICA_ID);
+ new MutableRegionInfo(1L, MetaTableName.getInstance(), RegionInfo.DEFAULT_REPLICA_ID);
private final TableName tableName;
private byte[] startKey = HConstants.EMPTY_START_ROW;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
index 3f353b5799d4..b8288e709cb7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -82,7 +82,7 @@ public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuratio
*/
public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
- if (displayKey || ri.getTable().equals(TableName.META_TABLE_NAME)) {
+ if (displayKey || ri.getTable().equals(MetaTableName.getInstance())) {
return ri.getRegionName();
} else {
// create a modified regionname with the startkey replaced but preserving
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index eed1a40a2c2f..a1b766696250 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -38,6 +38,7 @@
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.HBaseException;
@@ -616,7 +617,7 @@ private ModifyableTableDescriptor(final TableName name,
families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c)));
this.values.putAll(values);
this.values.put(IS_META_KEY,
- new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME)))));
+ new Bytes(Bytes.toBytes(Boolean.toString(name.equals(MetaTableName.getInstance())))));
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
index 40ff0373c36c..8f0c11a03f6b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
@@ -22,7 +22,7 @@
import java.security.PrivilegedExceptionAction;
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Connection;
@@ -73,7 +73,7 @@ private static void injectFault() throws ServiceException {
future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException));
return future;
}
- AsyncTable> table = conn.getTable(TableName.META_TABLE_NAME);
+ AsyncTable> table = conn.getTable(MetaTableName.getInstance());
table. coprocessorService(
AuthenticationProtos.AuthenticationService::newStub,
@@ -102,7 +102,7 @@ static Token obtainToken(Connection conn) throws
try {
injectFault();
- meta = conn.getTable(TableName.META_TABLE_NAME);
+ meta = conn.getTable(MetaTableName.getInstance());
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 60175137ad2c..7cb4ffe285eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -65,6 +65,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ServerTask;
@@ -3325,7 +3326,7 @@ public static String toLockJson(List lockedRes
long regionId = proto.getRegionId();
int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId;
- if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) {
+ if (tableName.equals(MetaTableName.getInstance()) && replicaId == defaultReplicaId) {
return RegionInfoBuilder.FIRST_META_REGIONINFO;
}
byte[] startKey = null;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
index f65c7ccb6e75..2206a800767e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -195,7 +196,7 @@ public void testCreateSystemTable() {
// that we pass the correct priority
@Test
public void testCreateMetaTable() {
- conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+ conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(MetaTableName.getInstance())
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join();
verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS),
any(CreateTableRequest.class), any());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index a7df92999d08..d519870080bf 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -45,9 +45,9 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MatcherPredicate;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
@@ -85,7 +85,7 @@ public class TestAsyncRegionLocatorTracing {
@Before
public void setUp() throws IOException {
- RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+ RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).build();
locs = new RegionLocations(
new HRegionLocation(metaRegionInfo,
ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
@@ -147,30 +147,30 @@ public void testClearCacheServerName() {
@Test
public void testClearCacheTableName() {
- conn.getLocator().clearCache(TableName.META_TABLE_NAME);
+ conn.getLocator().clearCache(MetaTableName.getInstance());
SpanData span = waitSpan("AsyncRegionLocator.clearCache");
assertThat(span,
allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(TableName.META_TABLE_NAME)));
+ buildTableAttributesMatcher(MetaTableName.getInstance())));
}
@Test
public void testGetRegionLocation() {
- conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
+ conn.getLocator().getRegionLocation(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW,
RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join();
SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation");
assertThat(span,
allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(TableName.META_TABLE_NAME),
+ buildTableAttributesMatcher(MetaTableName.getInstance()),
hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions",
locs.getDefaultRegionLocation().getRegion().getRegionNameAsString()))));
}
@Test
public void testGetRegionLocations() {
- conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
+ conn.getLocator().getRegionLocations(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW,
RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join();
SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations");
String[] expectedRegions =
@@ -178,7 +178,7 @@ public void testGetRegionLocations() {
.map(RegionInfo::getRegionNameAsString).toArray(String[]::new);
assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes(
+ buildTableAttributesMatcher(MetaTableName.getInstance()), hasAttributes(
containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions)))));
}
}
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
index cb5431c35d3e..34e9ba201838 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
@@ -50,6 +50,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -237,7 +238,7 @@ public void testGetSystemTable() {
@Test
public void testGetMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).get(new Get(Bytes.toBytes(0))).join();
+ conn.getTable(MetaTableName.getInstance()).get(new Get(Bytes.toBytes(0))).join();
verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any());
}
@@ -268,7 +269,7 @@ public void testPutSystemTable() {
@Test
public void testPutMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).put(new Put(Bytes.toBytes(0))
+ conn.getTable(MetaTableName.getInstance()).put(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -296,7 +297,7 @@ public void testDeleteSystemTable() {
@Test
public void testDeleteMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).delete(new Delete(Bytes.toBytes(0))).join();
+ conn.getTable(MetaTableName.getInstance()).delete(new Delete(Bytes.toBytes(0))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -327,7 +328,7 @@ public void testAppendSystemTable() {
@Test
public void testAppendMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).append(new Append(Bytes.toBytes(0))
+ conn.getTable(MetaTableName.getInstance()).append(new Append(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -355,7 +356,7 @@ public void testIncrementSystemTable() {
@Test
public void testIncrementMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME)
+ conn.getTable(MetaTableName.getInstance())
.incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -393,7 +394,7 @@ public void testCheckAndPutSystemTable() {
@Test
public void testCheckAndPutMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
.join();
@@ -426,7 +427,7 @@ public void testCheckAndDeleteSystemTable() {
@Test
public void testCheckAndDeleteMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
.join();
@@ -467,7 +468,7 @@ public void testCheckAndMutateSystemTable() throws IOException {
@Test
public void testCheckAndMutateMetaTable() throws IOException {
- conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v"))
.thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0))))
.join();
@@ -555,7 +556,7 @@ public void testScanSystemTable() throws Exception {
@Test
public void testScanMetaTable() throws Exception {
CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS);
- testForTable(TableName.META_TABLE_NAME, renewFuture, Optional.empty());
+ testForTable(MetaTableName.getInstance(), renewFuture, Optional.empty());
}
private void testForTable(TableName tableName, CompletableFuture renewFuture,
@@ -598,7 +599,7 @@ public void testBatchSystemTable() {
@Test
public void testBatchMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0))))
+ conn.getTable(MetaTableName.getInstance()).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0))))
.join();
verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS),
any(ClientProtos.MultiRequest.class), any());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
index 40617d78950a..c8e6b2158ce1 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -34,6 +33,7 @@
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -94,7 +94,7 @@ public void testCompactTableWithNullLocations() throws Exception {
mockedMeta.when(() -> ClientMetaTableAccessor.getTableHRegionLocations(any(AsyncTable.class),
any(TableName.class))).thenReturn(nullLocationsFuture);
AsyncTable metaTable = mock(AsyncTable.class);
- when(connection.getTable(META_TABLE_NAME)).thenReturn(metaTable);
+ when(connection.getTable(MetaTableName.getInstance())).thenReturn(metaTable);
HashedWheelTimer hashedWheelTimer = mock(HashedWheelTimer.class);
AsyncAdminBuilderBase asyncAdminBuilderBase = mock(AsyncAdminBuilderBase.class);
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
index f74b79a0672e..e01b3b741dcc 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -141,7 +142,7 @@ public void testContainsRange() {
@Test
public void testContainsRangeForMetaTable() {
TableDescriptor tableDesc =
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build();
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] row1 = Bytes.toBytes("a,a,0");
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
index 53f33845ef7d..d09f7a225a6e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
@@ -26,6 +26,7 @@
import java.io.IOException;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.HBaseException;
@@ -59,7 +60,7 @@ public class TestTableDescriptorBuilder {
@Test(expected = IOException.class)
public void testAddCoprocessorTwice() throws IOException {
String cpName = "a.b.c.d";
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setCoprocessor(cpName)
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setCoprocessor(cpName)
.setCoprocessor(cpName).build();
}
@@ -67,7 +68,7 @@ public void testAddCoprocessorTwice() throws IOException {
public void testPb() throws DeserializationException, IOException {
final int v = 123;
TableDescriptor htd =
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v)
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setMaxFileSize(v)
.setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build();
byte[] bytes = TableDescriptorBuilder.toByteArray(htd);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index 0e6a53ca7c47..49eb3b9cce62 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -787,7 +787,7 @@ public static CellComparator getCellComparator(TableName tableName) {
*/
public static CellComparator getCellComparator(byte[] tableName) {
// FYI, TableName.toBytes does not create an array; just returns existing array pointer.
- return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes())
+ return Bytes.equals(tableName, MetaTableName.getInstance().toBytes())
? MetaCellComparator.META_COMPARATOR
: CellComparatorImpl.COMPARATOR;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1051686d32e8..2c0fcafabfca 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1209,7 +1209,7 @@ public enum OperationStatusCode {
@Deprecated
public static final List HBASE_NON_USER_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList(
- (String[]) ArrayUtils.addAll(new String[] { TableName.META_TABLE_NAME.getNameAsString() },
+ (String[]) ArrayUtils.addAll(new String[] { MetaTableName.getInstance().getNameAsString() },
HBASE_NON_TABLE_DIRS.toArray())));
/** Health script related settings. */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java
index 7f6e87ebf911..e2fd632be19f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java
@@ -75,7 +75,7 @@ public static CellComparator getInnerStoreCellComparator(TableName tableName) {
* @return CellComparator to use going off the {@code tableName} passed.
*/
public static CellComparator getInnerStoreCellComparator(byte[] tableName) {
- return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes())
+ return Bytes.equals(tableName, MetaTableName.getInstance().toBytes())
? MetaCellComparator.META_COMPARATOR
: InnerStoreCellComparator.INNER_STORE_COMPARATOR;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
new file mode 100644
index 000000000000..bbacedc28390
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@InterfaceAudience.Public
+public class MetaTableName {
+ private static final Logger LOG = LoggerFactory.getLogger(MetaTableName.class);
+ private static volatile TableName instance;
+
+ private MetaTableName() {
+ }
+
+ /**
+ * Get the singleton instance of the meta table name.
+ * @return The meta table name instance
+ */
+ public static TableName getInstance() {
+ if (instance == null) {
+ synchronized (MetaTableName.class) {
+ if (instance == null) {
+ instance = initializeHbaseMetaTableName(HBaseConfiguration.create());
+ LOG.info("Meta table name initialized: {}", instance.getName());
+ }
+ }
+ }
+ return instance;
+ }
+
+ /**
+ * Initialize the meta table name from the given configuration.
+ * @param conf The configuration to use
+ * @return The initialized meta table name
+ */
+ private static TableName initializeHbaseMetaTableName(Configuration conf) {
+ TableName metaTableName =
+ TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
+ LOG.info("Meta table suffix value: {}", metaTableName);
+ return metaTableName;
+ }
+}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index b6d854c13784..442dc10d8721 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -25,6 +25,9 @@
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
@@ -43,8 +46,11 @@
*
*/
@InterfaceAudience.Public
+@InterfaceStability.Stable
public final class TableName implements Comparable {
+ private static final Logger LOG = LoggerFactory.getLogger(TableName.class);
+
/** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */
private static final Set tableCache = new CopyOnWriteArraySet<>();
@@ -66,7 +72,8 @@ public final class TableName implements Comparable {
+ NAMESPACE_DELIM + ")?)" + "(?:" + VALID_TABLE_QUALIFIER_REGEX + "))";
/** The hbase:meta table's name. */
- public static final TableName META_TABLE_NAME =
+ @Deprecated
+ public static TableName META_TABLE_NAME =
valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
/**
@@ -87,7 +94,7 @@ public final class TableName implements Comparable {
/** Returns True if tn is the hbase:meta table name. */
public static boolean isMetaTableName(final TableName tn) {
- return tn.equals(TableName.META_TABLE_NAME);
+ return tn.equals(MetaTableName.getInstance());
}
/**
@@ -288,8 +295,8 @@ private TableName(ByteBuffer namespace, ByteBuffer qualifier) throws IllegalArgu
throw new IllegalArgumentException(OLD_ROOT_STR + " has been deprecated.");
}
if (qualifierAsString.equals(OLD_META_STR)) {
- throw new IllegalArgumentException(
- OLD_META_STR + " no longer exists. The table has been " + "renamed to " + META_TABLE_NAME);
+ throw new IllegalArgumentException(OLD_META_STR + " no longer exists. The table has been "
+ + "renamed to " + MetaTableName.getInstance());
}
if (Bytes.equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME, namespace)) {
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
index 553b39311369..5c143d8ee065 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
@@ -196,39 +196,37 @@ public void testMetaComparisons2() {
long now = EnvironmentEdgeManager.currentTime();
CellComparator c = MetaCellComparator.META_COMPARATOR;
assertTrue(c.compare(
- createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)),
- createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)))
- == 0);
+ createByteBufferKeyValueFromKeyValue(new KeyValue(
+ Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)),
+ createByteBufferKeyValueFromKeyValue(new KeyValue(
+ Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))) == 0);
Cell a = createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now));
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now));
Cell b = createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now));
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now));
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(
- createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now)),
- createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)))
- > 0);
+ createByteBufferKeyValueFromKeyValue(new KeyValue(
+ Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now)),
+ createByteBufferKeyValueFromKeyValue(new KeyValue(
+ Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))) > 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)))
== 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)))
< 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)))
> 0);
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
index 1644a6f1fce7..1e65b75a9777 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
@@ -198,31 +198,32 @@ public void testKeyValueBorderCases() {
private void metacomparisons(final CellComparatorImpl c) {
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))
== 0);
KeyValue a =
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now);
KeyValue b =
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now);
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))
> 0);
}
private void comparisons(final CellComparatorImpl c) {
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) == 0);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now))
+ == 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)) < 0);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)) < 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) > 0);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)) > 0);
}
@Test
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
index a459074ba27d..54ceeecfec21 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -222,16 +222,16 @@ public void testRegionStatesCount() throws Exception {
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getClosedRegions(), 0);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getClosedRegions(), 0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getSplitRegions(), 0);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getSplitRegions(), 0);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1);
@@ -253,12 +253,12 @@ public void testRegionStatesWithSplit() throws Exception {
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1);
@@ -273,12 +273,12 @@ public void testRegionStatesWithSplit() throws Exception {
metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 2);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
index 5e8447c2ad81..267b78dade13 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
@@ -42,8 +42,8 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.slf4j.Logger;
@@ -165,7 +165,7 @@ public void startHBase() throws IOException {
int attemptsLeft = 10;
while (attemptsLeft-- > 0) {
try {
- testUtil.getConnection().getTable(TableName.META_TABLE_NAME);
+ testUtil.getConnection().getTable(MetaTableName.getInstance());
} catch (Exception e) {
LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e);
Threads.sleep(1000);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
index 144ea6503b06..dc7d025796bd 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -124,7 +125,7 @@ protected int doWork() throws Exception {
LOG.debug("Trying to scan meta");
- Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
+ Table metaTable = connection.getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(new Scan());
Result result;
while ((result = scanner.next()) != null) {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
index 53ed8a25ed0e..7aea9b356259 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.master;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.metrics.BaseSource;
import org.apache.yetus.audience.InterfaceAudience;
@@ -49,7 +50,7 @@ public interface MetricsMasterFileSystemSource extends BaseSource {
String SPLIT_SIZE_NAME = "hlogSplitSize";
String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
- String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split";
+ String META_SPLIT_SIZE_DESC = "Size of " + MetaTableName.getInstance() + " WAL files being split";
String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()";
String SPLIT_SIZE_DESC = "Size of WAL files being split";
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
index a8c3a16d13dc..fd07d7e1dc6a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
@@ -56,7 +56,7 @@ public static void setUp() throws Exception {
1000);
// Make sure there are three servers.
util.initializeCluster(3);
- HBaseTestingUtil.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(util.getAdmin(), MetaTableName.getInstance(), 3);
ZKWatcher zkw = util.getZooKeeperWatcher();
Configuration conf = util.getConfiguration();
String baseZNode =
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index 2bb87ca8f2f6..fbc98f006393 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceExistException;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableExistsException;
@@ -184,7 +185,7 @@ private static void setupActions() throws IOException {
// Set up the action that will move the regions of meta.
moveMetaRegionsAction = new MoveRegionsOfTableAction(sleepTime,
- MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, TableName.META_TABLE_NAME);
+ MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, MetaTableName.getInstance());
// Set up the action that will move the regions of our table.
moveRegionAction = new MoveRegionsOfTableAction(sleepTime,
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index d4ccac901436..b861c29a9bcc 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -50,6 +50,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
@@ -256,7 +257,7 @@ public void testSimpleCase() throws Throwable {
@Test
public void testMetaExport() throws Throwable {
String[] args =
- new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" };
+ new String[] { MetaTableName.getInstance().getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" };
assertTrue(runExport(args));
}
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
index a115fd17af3f..5ddc23d36554 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.rest.client.Client;
@@ -55,7 +56,7 @@ public class TestStatusResource {
private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class);
- private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1");
+ private static final byte[] META_REGION_NAME = Bytes.toBytes(MetaTableName.getInstance() + ",,1");
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
index 8310232890dd..1db8c371593c 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
@@ -23,7 +23,7 @@
import java.util.Iterator;
import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -89,8 +89,8 @@ protected StorageClusterStatusModel buildTestModel() {
model.addLiveNode("test1", 1245219839331L, 128, 1024).addRegion(Bytes.toBytes("hbase:root,,0"),
1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1);
model.addLiveNode("test2", 1245239331198L, 512, 1024).addRegion(
- Bytes.toBytes(TableName.META_TABLE_NAME + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1,
- 1, 1);
+ Bytes.toBytes(MetaTableName.getInstance() + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1,
+ 1, 1, 1);
return model;
}
@@ -128,7 +128,7 @@ protected void checkModel(StorageClusterStatusModel model) {
assertEquals(1024, node.getMaxHeapSizeMB());
regions = node.getRegions().iterator();
region = regions.next();
- assertEquals(Bytes.toString(region.getName()), TableName.META_TABLE_NAME + ",,1246000043724");
+ assertEquals(Bytes.toString(region.getName()), MetaTableName.getInstance() + ",,1246000043724");
assertEquals(1, region.getStores());
assertEquals(1, region.getStorefiles());
assertEquals(0, region.getStorefileSizeMB());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 05b049e27dbc..24bbfb5079f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -151,7 +151,7 @@ public static Table getMetaHTable(final Connection connection) throws IOExceptio
if (connection.isClosed()) {
throw new IOException("connection is closed");
}
- return connection.getTable(TableName.META_TABLE_NAME);
+ return connection.getTable(MetaTableName.getInstance());
}
/**
@@ -366,7 +366,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) {
public static List> getTableRegionsAndLocations(
Connection connection, @Nullable final TableName tableName,
final boolean excludeOfflinedSplitParents) throws IOException {
- if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName != null && tableName.equals(MetaTableName.getInstance())) {
throw new IOException(
"This method can't be used to locate meta regions; use MetaTableLocator instead");
}
@@ -592,7 +592,7 @@ public static PairOfSameType getDaughterRegions(Result data) {
*/
@Nullable
public static TableState getTableState(Connection conn, TableName tableName) throws IOException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
return new TableState(tableName, TableState.State.ENABLED);
}
Table metaHTable = getMetaHTable(conn);
@@ -859,7 +859,7 @@ public static void addRegionsToMeta(Connection connection, List regi
private static void updateTableState(Connection connection, TableState state) throws IOException {
Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime());
putToMetaTable(connection, put);
- LOG.info("Updated {} in hbase:meta", state);
+ LOG.info("Updated {} in {}", state, MetaTableName.getInstance());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
index 3cac1f319dae..aa934cc6d462 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -133,7 +133,7 @@ private String getRegionIdFromOp(Row op) {
}
private boolean isMetaTableOp(ObserverContext extends RegionCoprocessorEnvironment> e) {
- return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable());
+ return MetaTableName.getInstance().equals(e.getEnvironment().getRegionInfo().getTable());
}
private void clientMetricRegisterAndMark() {
@@ -268,7 +268,7 @@ public void start(CoprocessorEnvironment env) throws IOException {
env instanceof RegionCoprocessorEnvironment
&& ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null
&& ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable()
- .equals(TableName.META_TABLE_NAME)
+ .equals(MetaTableName.getInstance())
) {
RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env;
registry = regionCoprocessorEnv.getMetricRegistryForRegionServer();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 22d3ab69b51c..d11b244448b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -83,6 +83,7 @@
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.PleaseRestartMasterException;
@@ -1092,7 +1093,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
InitMetaProcedure initMetaProc = null;
// Print out state of hbase:meta on startup; helps debugging.
- if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
+ if (
+ !this.assignmentManager.getRegionStates().hasTableRegionStates(MetaTableName.getInstance())
+ ) {
Optional optProc = procedureExecutor.getProcedures().stream()
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
initMetaProc = optProc.orElseGet(() -> {
@@ -1156,7 +1159,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
return;
}
- TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
+ TableDescriptor metaDescriptor = tableDescriptors.get(MetaTableName.getInstance());
final ColumnFamilyDescriptor tableFamilyDesc =
metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
final ColumnFamilyDescriptor replBarrierFamilyDesc =
@@ -1174,16 +1177,17 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
int replicasNumInConf =
conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
- TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
+ TableDescriptor metaDesc = tableDescriptors.get(MetaTableName.getInstance());
if (metaDesc.getRegionReplication() != replicasNumInConf) {
// it is possible that we already have some replicas before upgrading, so we must set the
// region replication number in meta TableDescriptor directly first, without creating a
// ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
int existingReplicasCount =
- assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
+ assignmentManager.getRegionStates().getRegionsOfTable(MetaTableName.getInstance()).size();
if (existingReplicasCount > metaDesc.getRegionReplication()) {
- LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)"
- + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
+ LOG.info(
+ "Update replica count of {} from {}(in TableDescriptor)" + " to {}(existing ZNodes)",
+ MetaTableName.getInstance(), metaDesc.getRegionReplication(), existingReplicasCount);
metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
.setRegionReplication(existingReplicasCount).build();
tableDescriptors.update(metaDesc);
@@ -1193,7 +1197,8 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
LOG.info(
"The {} config is {} while the replica count in TableDescriptor is {}"
+ " for hbase:meta, altering...",
- HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
+ HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(),
+ MetaTableName.getInstance());
procedureExecutor.submitProcedure(new ModifyTableProcedure(
procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc)
.setRegionReplication(replicasNumInConf).build(),
@@ -1423,7 +1428,7 @@ private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor)
.setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf))
.setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
- long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
+ long pid = this.modifyTable(MetaTableName.getInstance(), () -> newMetaDesc, 0, 0, false);
int tries = 30;
while (
!(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning()
@@ -2586,7 +2591,7 @@ private void startActiveMasterManager(int infoPort) throws KeeperException {
}
private static boolean isCatalogTable(final TableName tableName) {
- return tableName.equals(TableName.META_TABLE_NAME);
+ return tableName.equals(MetaTableName.getInstance());
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
index 464dfaca7035..c77d2c0c1a85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.AnnotationReadingPriorityFunction;
import org.apache.yetus.audience.InterfaceAudience;
@@ -84,7 +85,7 @@ protected int getBasePriority(RequestHeader header, Message param) {
if (rst.getRegionInfoList() != null) {
for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
TableName tn = ProtobufUtil.toTableName(info.getTableName());
- if (TableName.META_TABLE_NAME.equals(tn)) {
+ if (MetaTableName.getInstance().equals(tn)) {
return META_TRANSITION_QOS;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 854c21da2bc7..3edfc1eb67a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
@@ -605,7 +606,7 @@ public static void printAssignmentPlan(FavoredNodesPlan plan) {
*/
public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException {
try {
- LOG.info("Start to update the hbase:meta with the new assignment plan");
+ LOG.info("Started updating {} with the new assignment plan", MetaTableName.getInstance());
Map> assignmentMap = plan.getAssignmentMap();
Map> planToUpdate = new HashMap<>(assignmentMap.size());
Map regionToRegionInfoMap =
@@ -619,6 +620,7 @@ public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException
} catch (Exception e) {
LOG.error(
"Failed to update hbase:meta with the new assignment" + "plan because " + e.getMessage());
+ LOG.info("Updated {} with the new assignment plan", MetaTableName.getInstance());
}
}
@@ -690,14 +692,14 @@ private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws I
}
public void updateAssignmentPlan(FavoredNodesPlan plan) throws IOException {
- LOG.info("Start to update the new assignment plan for the hbase:meta table and"
- + " the region servers");
+ LOG.info("Started updating the new assignment plan for {} and the region servers",
+ MetaTableName.getInstance());
// Update the new assignment plan to META
updateAssignmentPlanToMeta(plan);
// Update the new assignment plan to Region Servers
updateAssignmentPlanToRegionServers(plan);
- LOG.info("Finish to update the new assignment plan for the hbase:meta table and"
- + " the region servers");
+ LOG.info("Finished updating the new assignment plan for {} and the region servers",
+ MetaTableName.getInstance());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 4d18b2ad8f4e..9903c84785e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
@@ -79,7 +80,7 @@ private void tryMigrateNamespaceTable() throws IOException, InterruptedException
if (!opt.isPresent()) {
// the procedure is not present, check whether have the ns family in meta table
TableDescriptor metaTableDesc =
- masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME);
+ masterServices.getTableDescriptors().get(MetaTableName.getInstance());
if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
// normal case, upgrading is done or the cluster is created with 3.x code
migrationDone = true;
@@ -106,7 +107,7 @@ private void addToCache(Result result, byte[] family, byte[] qualifier) throws I
}
private void loadFromMeta() throws IOException {
- try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME);
+ try (Table table = masterServices.getConnection().getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) {
for (Result result;;) {
result = scanner.next();
@@ -204,7 +205,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns
Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY,
HConstants.NAMESPACE_COL_DESC_QUALIFIER,
ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
- try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = conn.getTable(MetaTableName.getInstance())) {
table.put(put);
}
}
@@ -212,7 +213,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns
public void deleteNamespace(String namespaceName) throws IOException {
checkMigrationDone();
Delete d = new Delete(Bytes.toBytes(namespaceName));
- try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = masterServices.getConnection().getTable(MetaTableName.getInstance())) {
table.delete(d);
}
cache.remove(namespaceName);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 6ad32623be1a..cdd54d616bee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Result;
@@ -86,7 +87,7 @@ public boolean isTableState(TableName tableName, TableState.State... states) {
}
public void setDeletedTable(TableName tableName) throws IOException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
// Can't delete the hbase:meta table.
return;
}
@@ -147,7 +148,7 @@ public TableState getTableState(TableName tableName) throws IOException {
}
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
if (
TableState.State.DISABLING.equals(newState) || TableState.State.DISABLED.equals(newState)
) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 1b64ddea23e3..f352bbc8b28f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -45,6 +45,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -354,7 +355,7 @@ public void start() throws IOException, KeeperException {
if (RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) {
setMetaAssigned(regionInfo, state == State.OPEN);
}
- LOG.debug("Loaded hbase:meta {}", regionNode);
+ LOG.debug("Loaded {} {}", MetaTableName.getInstance(), regionNode);
}, result);
}
}
@@ -1948,8 +1949,8 @@ private void checkMetaLoaded(RegionInfo hri) throws PleaseHoldException {
boolean meta = isMetaRegion(hri);
boolean metaLoaded = isMetaLoaded();
if (!meta && !metaLoaded) {
- throw new PleaseHoldException(
- "Master not fully online; hbase:meta=" + meta + ", metaLoaded=" + metaLoaded);
+ throw new PleaseHoldException("Master not fully online; " + MetaTableName.getInstance() + "="
+ + meta + ", metaLoaded=" + metaLoaded);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index c370fed9d9c0..e6891d08f075 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
@@ -718,8 +719,10 @@ private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOExcept
RegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
- LOG.error("Row key of mutation from coprocessor is not parsable as region name. "
- + "Mutations from coprocessor should only be for hbase:meta table.", e);
+ LOG.error(
+ "Row key of mutation from coprocessor is not parsable as region name. "
+ + "Mutations from coprocessor should only be for {} table.",
+ MetaTableName.getInstance(), e);
throw e;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 5987fc7537b4..4d42ad619255 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -169,9 +170,10 @@ public static void visitMetaEntry(final RegionStateVisitor visitor, final Result
final long openSeqNum = hrl.getSeqNum();
LOG.debug(
- "Load hbase:meta entry region={}, regionState={}, lastHost={}, "
+ "Load {} entry region={}, regionState={}, lastHost={}, "
+ "regionLocation={}, openSeqNum={}",
- regionInfo.getEncodedName(), state, lastHost, regionLocation, openSeqNum);
+ MetaTableName.getInstance(), regionInfo.getEncodedName(), state, lastHost, regionLocation,
+ openSeqNum);
visitor.visitRegionState(result, regionInfo, state, regionLocation, lastHost, openSeqNum);
}
}
@@ -190,8 +192,8 @@ private Put generateUpdateRegionLocationPut(RegionStateNode regionStateNode) thr
final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
MetaTableAccessor.addRegionInfo(put, regionInfo);
final StringBuilder info =
- new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
- .append(regionInfo.getEncodedName()).append(", regionState=").append(state);
+ new StringBuilder("pid=").append(pid).append(" updating ").append(MetaTableName.getInstance())
+ .append(" row=").append(regionInfo.getEncodedName()).append(", regionState=").append(state);
if (openSeqNum >= 0) {
Preconditions.checkArgument(state == State.OPEN && regionLocation != null,
"Open region should be on a server");
@@ -283,7 +285,7 @@ private CompletableFuture updateRegionLocation(RegionInfo regionInfo, Stat
future = FutureUtils.failedFuture(e);
}
} else {
- AsyncTable> table = master.getAsyncConnection().getTable(TableName.META_TABLE_NAME);
+ AsyncTable> table = master.getAsyncConnection().getTable(MetaTableName.getInstance());
future = table.put(put);
}
FutureUtils.addListener(future, (r, e) -> {
@@ -330,7 +332,7 @@ private void multiMutate(RegionInfo ri, List mutations) throws IOExcep
}
MutateRowsRequest request = builder.build();
AsyncTable> table =
- master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME);
+ master.getConnection().toAsyncConnection().getTable(MetaTableName.getInstance());
CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub,
(stub, controller, done) -> stub.mutateRows(controller, request, done), row);
@@ -338,7 +340,7 @@ MutateRowsResponse> coprocessorService(MultiRowMutationService::newStub,
}
private Table getMetaTable() throws IOException {
- return master.getConnection().getTable(TableName.META_TABLE_NAME);
+ return master.getConnection().getTable(MetaTableName.getInstance());
}
private Result getRegionCatalogResult(RegionInfo region) throws IOException {
@@ -504,7 +506,7 @@ public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException {
+ " in meta table, they are cleaned up already, Skip.");
return;
}
- try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = master.getConnection().getTable(MetaTableName.getInstance())) {
table.delete(delete);
}
LOG.info(
@@ -694,7 +696,7 @@ public static State getRegionState(final Result r, RegionInfo regionInfo) {
return State.valueOf(state);
} catch (IllegalArgumentException e) {
LOG.warn(
- "BAD value {} in hbase:meta info:state column for region {} , "
+ "BAD value {} in " + MetaTableName.getInstance() + " info:state column for region {} , "
+ "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE",
state, regionInfo.getEncodedName());
return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 3d3d3d18de23..55ac3fa2a0bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
@@ -903,8 +904,10 @@ private void preSplitRegionBeforeMETA(final MasterProcedureEnv env)
RegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
- LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as "
- + "region name." + "Mutations from coprocessor should only for hbase:meta table.");
+ LOG.error(
+ "pid={} row key of mutation from coprocessor not parsable as region name. "
+ + "Mutations from coprocessor should only be for {} table.",
+ getProcId(), MetaTableName.getInstance());
throw e;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java
index 77b1082d0f03..419f8fb15312 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
@@ -80,7 +81,7 @@ public synchronized void chore() {
long deletedLastPushedSeqIds = 0;
TableName tableName = null;
List peerIds = null;
- try (Table metaTable = conn.getTable(TableName.META_TABLE_NAME);
+ try (Table metaTable = conn.getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(
new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) {
for (;;) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
index 9f5ff857d4d8..f9fa67da83ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
@@ -33,6 +33,7 @@
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer;
import org.apache.hadoop.hbase.client.AsyncConnection;
@@ -156,7 +157,7 @@ public TableName getScanTable() {
public Results getResults() {
final AsyncTable asyncTable =
- connection.getTable(TableName.META_TABLE_NAME);
+ connection.getTable(MetaTableName.getInstance());
return new Results(asyncTable.getScanner(buildScan()));
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
index 0d3ddb43abd4..a6cec33c3efb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -105,7 +106,7 @@ protected boolean initialChore() {
scan();
}
} catch (IOException e) {
- LOG.warn("Failed initial janitorial scan of hbase:meta table", e);
+ LOG.warn("Failed initial janitorial scan of {} table", MetaTableName.getInstance(), e);
return false;
}
return true;
@@ -145,7 +146,7 @@ protected void chore() {
+ this.services.getServerManager().isClusterShutdown());
}
} catch (IOException e) {
- LOG.warn("Failed janitorial scan of hbase:meta table", e);
+ LOG.warn("Failed janitorial scan of {} table", MetaTableName.getInstance(), e);
}
}
@@ -484,7 +485,7 @@ public static void main(String[] args) throws IOException {
*/
Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0."));
g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- try (Table t = connection.getTable(TableName.META_TABLE_NAME)) {
+ try (Table t = connection.getTable(MetaTableName.getInstance())) {
Result r = t.get(g);
byte[] row = g.getRow();
row[row.length - 2] <<= row[row.length - 2];
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
index 1244d5bf3525..9b30d5198510 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -203,19 +204,19 @@ private static List createMetaEntries(final MasterServices masterSer
.flatMap(List::stream).collect(Collectors.toList());
final List createMetaEntriesFailures = addMetaEntriesResults.stream()
.filter(Either::hasRight).map(Either::getRight).collect(Collectors.toList());
- LOG.debug("Added {}/{} entries to hbase:meta", createMetaEntriesSuccesses.size(),
- newRegionInfos.size());
+ LOG.debug("Added {}/{} entries to {}", createMetaEntriesSuccesses.size(), newRegionInfos.size(),
+ MetaTableName.getInstance());
if (!createMetaEntriesFailures.isEmpty()) {
LOG.warn(
- "Failed to create entries in hbase:meta for {}/{} RegionInfo descriptors. First"
+ "Failed to create entries in {}} for {}/{} RegionInfo descriptors. First"
+ " failure message included; full list of failures with accompanying stack traces is"
+ " available at log level DEBUG. message={}",
- createMetaEntriesFailures.size(), addMetaEntriesResults.size(),
+ MetaTableName.getInstance(), createMetaEntriesFailures.size(), addMetaEntriesResults.size(),
createMetaEntriesFailures.get(0).getMessage());
if (LOG.isDebugEnabled()) {
- createMetaEntriesFailures
- .forEach(ioe -> LOG.debug("Attempt to fix region hole in hbase:meta failed.", ioe));
+ createMetaEntriesFailures.forEach(ioe -> LOG
+ .debug("Attempt to fix region hole in {} failed.", MetaTableName.getInstance(), ioe));
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
index c712f1cba672..c74be9de50d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -137,8 +138,9 @@ private RegionInfo metaTableConsistencyCheck(Result metaTableRow) {
if (!Bytes.equals(metaTableRow.getRow(), ri.getRegionName())) {
LOG.warn(
"INCONSISTENCY: Row name is not equal to serialized info:regioninfo content; "
- + "row={} {}; See if RegionInfo is referenced in another hbase:meta row? Delete?",
- Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString());
+ + "row={} {}; See if RegionInfo is referenced in another {} row? Delete?",
+ Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString(),
+ MetaTableName.getInstance());
return null;
}
// Skip split parent region
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index e199f6d5971d..98f43871238e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -394,7 +395,8 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table
long now = EnvironmentEdgeManager.currentTime();
List deletes = new ArrayList<>();
try (
- Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME);
+ Table metaTable =
+ env.getMasterServices().getConnection().getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(tableScan)) {
for (;;) {
Result result = scanner.next();
@@ -405,7 +407,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table
}
if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from "
- + TableName.META_TABLE_NAME);
+ + MetaTableName.getInstance());
metaTable.delete(deletes);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index e8999b886afd..8ce33c1574ca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -111,7 +112,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTable
) {
MasterFileSystem fs = env.getMasterFileSystem();
try (BufferedMutator mutator = env.getMasterServices().getConnection()
- .getBufferedMutator(TableName.META_TABLE_NAME)) {
+ .getBufferedMutator(MetaTableName.getInstance())) {
for (RegionInfo region : env.getAssignmentManager().getRegionStates()
.getRegionsOfTable(tableName)) {
long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId(
@@ -230,7 +231,7 @@ public TableOperationType getTableOperationType() {
*/
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true;
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
setFailure("master-disable-table",
new ConstraintException("Cannot disable " + this.tableName));
canTableBeDisabled = false;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
index 43d69361c2d2..d7a4ce95c4ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Connection;
@@ -102,14 +103,14 @@ List getRegionsOnCrashedServer(MasterProcedureEnv env) {
MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(), visitor,
null);
} catch (IOException ioe) {
- LOG.warn("Failed scan of hbase:meta for 'Unknown Servers'", ioe);
+ LOG.warn("Failed scan of {} for 'Unknown Servers'", MetaTableName.getInstance(), ioe);
return ris;
}
// create the server state node too
env.getAssignmentManager().getRegionStates().createServer(getServerName());
- LOG.info("Found {} mentions of {} in hbase:meta of OPEN/OPENING Regions: {}",
- visitor.getReassigns().size(), getServerName(), visitor.getReassigns().stream()
- .map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
+ LOG.info("Found {} mentions of {} in {} of OPEN/OPENING Regions: {}",
+ visitor.getReassigns().size(), getServerName(), MetaTableName.getInstance(), visitor
+ .getReassigns().stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
return visitor.getReassigns();
}
@@ -150,8 +151,8 @@ public boolean visit(Result result) throws IOException {
RegionState rs = new RegionState(hrl.getRegion(), state, hrl.getServerName());
if (rs.isClosing()) {
// Move region to CLOSED in hbase:meta.
- LOG.info("Moving {} from CLOSING to CLOSED in hbase:meta",
- hrl.getRegion().getRegionNameAsString());
+ LOG.info("Moving {} from CLOSING to CLOSED in {}",
+ hrl.getRegion().getRegionNameAsString(), MetaTableName.getInstance());
try {
MetaTableAccessor.updateRegionState(this.connection, hrl.getRegion(),
RegionState.State.CLOSED);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index 8b4901e90e85..2dfc652fc542 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -67,7 +68,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure proc) {
// Meta Queue Lookup Helpers
// ============================================================================
private MetaQueue getMetaQueue() {
- MetaQueue node = AvlTree.get(metaMap, TableName.META_TABLE_NAME, META_QUEUE_KEY_COMPARATOR);
+ MetaQueue node = AvlTree.get(metaMap, MetaTableName.getInstance(), META_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
@@ -1079,7 +1080,7 @@ public boolean waitMetaExclusiveLock(Procedure> procedure) {
return false;
}
waitProcedure(lock, procedure);
- logLockedResource(LockedResourceType.META, TableName.META_TABLE_NAME.getNameAsString());
+ logLockedResource(LockedResourceType.META, MetaTableName.getInstance().getNameAsString());
return true;
} finally {
schedUnlock();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java
index 3d313c9ac3ab..5915971bd4c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java
@@ -19,6 +19,7 @@
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.procedure2.LockStatus;
import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -33,7 +34,7 @@
class MetaQueue extends Queue {
protected MetaQueue(LockStatus lockStatus) {
- super(TableName.META_TABLE_NAME, 1, lockStatus);
+ super(MetaTableName.getInstance(), 1, lockStatus);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
index dc9eac4c879d..30a120143ade 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Connection;
@@ -64,7 +65,7 @@ private void migrate(MasterProcedureEnv env) throws IOException {
try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME);
ResultScanner scanner = nsTable.getScanner(
new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions());
- BufferedMutator mutator = conn.getBufferedMutator(TableName.META_TABLE_NAME)) {
+ BufferedMutator mutator = conn.getBufferedMutator(MetaTableName.getInstance())) {
for (Result result;;) {
result = scanner.next();
if (result == null) {
@@ -88,7 +89,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MigrateNamespaceTablePro
switch (state) {
case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY:
TableDescriptor metaTableDesc =
- env.getMasterServices().getTableDescriptors().get(TableName.META_TABLE_NAME);
+ env.getMasterServices().getTableDescriptors().get(MetaTableName.getInstance());
if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
TableDescriptor newMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc)
.setColumnFamily(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 0d8981891e54..6951a35db098 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -107,8 +108,8 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H
Set cfs = this.modifiedTableDescriptor.getColumnFamilyNames();
for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) {
if (!cfs.contains(family)) {
- throw new HBaseIOException(
- "Delete of hbase:meta column family " + Bytes.toString(family));
+ throw new HBaseIOException("Delete of " + MetaTableName.getInstance() + " column family "
+ + Bytes.toString(family));
}
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java
index 642df36d535f..1b9b95889e05 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java
@@ -25,6 +25,7 @@
import java.util.stream.Collectors;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
@@ -174,7 +175,7 @@ List getLocks() {
addToLockedResources(lockedResources, regionLocks, Function.identity(),
LockedResourceType.REGION);
addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER);
- addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock),
+ addToLockedResources(lockedResources, ImmutableMap.of(MetaTableName.getInstance(), metaLock),
tn -> tn.getNameAsString(), LockedResourceType.META);
addToLockedResources(lockedResources, globalLocks, Function.identity(),
LockedResourceType.GLOBAL);
@@ -236,7 +237,7 @@ public String toString() {
.append("tableLocks", filterUnlocked(tableLocks))
.append("regionLocks", filterUnlocked(regionLocks))
.append("peerLocks", filterUnlocked(peerLocks))
- .append("metaLocks", filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock)))
+ .append("metaLocks", filterUnlocked(ImmutableMap.of(MetaTableName.getInstance(), metaLock)))
.append("globalLocks", filterUnlocked(globalLocks)).build();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
index ef11e68217a5..4adf36efc7f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
@@ -20,7 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -231,7 +231,7 @@ public void toStringClassDetails(StringBuilder sb) {
}
private boolean prepareTruncate() throws IOException {
- if (getTableName().equals(TableName.META_TABLE_NAME)) {
+ if (getTableName().equals(MetaTableName.getInstance())) {
throw new IOException("Can't truncate region in catalog tables");
}
return true;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index fdfea375e096..00f15239bc1c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -56,6 +56,7 @@
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseRpcServicesBase;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.MultiActionResultTooLarge;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -1925,7 +1926,7 @@ public OpenRegionResponse openRegion(final RpcController controller,
tableName = ProtobufUtil.toTableName(ri.getTableName());
}
}
- if (!TableName.META_TABLE_NAME.equals(tableName)) {
+ if (!MetaTableName.getInstance().equals(tableName)) {
throw new ServiceException(ie);
}
// We are assigning meta, wait a little for regionserver to finish initialization.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java
index 8bf32baada22..ebdd54a081d0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.ClientMetaTableAccessor.QueryType;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get;
@@ -192,7 +193,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co
.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)
.addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true)
.setCaching(10);
- try (Table table = conn.getTable(TableName.META_TABLE_NAME);
+ try (Table table = conn.getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(scan)) {
for (Result result;;) {
result = scanner.next();
@@ -215,7 +216,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co
public static long[] getReplicationBarriers(Connection conn, byte[] regionName)
throws IOException {
- try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = conn.getTable(MetaTableName.getInstance())) {
Result result = table.get(new Get(regionName)
.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER)
.readAllVersions());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
index 57d156ab1c2e..d90d671feffc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -544,7 +545,7 @@ public AuthResult permissionGranted(String request, User user, Action permReques
TableName tableName, Map> families) {
// 1. All users need read access to hbase:meta table.
// this is a very common operation, so deal with it quickly.
- if (TableName.META_TABLE_NAME.equals(tableName)) {
+ if (MetaTableName.getInstance().equals(tableName)) {
if (permRequest == Action.READ) {
return AuthResult.allow(request, "All users allowed", user, permRequest, tableName,
families);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
index 4d6f57e22edc..7c0dbffb1330 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
@@ -65,6 +65,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.AsyncAdmin;
@@ -660,21 +661,21 @@ private int getRegionIndex(List> startEndKeys, byte[] key)
private void checkRegionIndexValid(int idx, List> startEndKeys,
TableName tableName) throws IOException {
if (idx < 0) {
- throw new IOException("The first region info for table " + tableName
- + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
+ throw new IOException("The first region info for table " + tableName + " can't be found in "
+ + MetaTableName.getInstance() + ". Please use hbck tool to fix it" + " first.");
} else if (
(idx == startEndKeys.size() - 1)
&& !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY)
) {
- throw new IOException("The last region info for table " + tableName
- + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
+ throw new IOException("The last region info for table " + tableName + " can't be found in "
+ + MetaTableName.getInstance() + ". Please use hbck tool to fix it" + " first.");
} else if (
idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(),
startEndKeys.get(idx + 1).getFirst()) == 0)
) {
throw new IOException("The endkey of one region for table " + tableName
- + " is not equal to the startkey of the next region in hbase:meta."
- + "Please use hbck tool to fix it first.");
+ + " is not equal to the startkey of the next region in " + MetaTableName.getInstance() + "."
+ + " Please use hbck tool to fix it first.");
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 75bf721ef41e..e3c13f92a017 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -147,20 +148,20 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c
FileSystem fs, Path rootdir) throws IOException {
// see if we already have meta descriptor on fs. Write one if not.
Optional> opt = getTableDescriptorFromFs(fs,
- CommonFSUtils.getTableDir(rootdir, TableName.META_TABLE_NAME), false);
+ CommonFSUtils.getTableDir(rootdir, MetaTableName.getInstance()), false);
if (opt.isPresent()) {
return opt.get().getSecond();
}
TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf);
TableDescriptor td = StoreFileTrackerFactory.updateWithTrackerConfigs(conf, builder.build());
- LOG.info("Creating new hbase:meta table descriptor {}", td);
+ LOG.info("Creating new {} table descriptor {}", MetaTableName.getInstance(), td);
TableName tableName = td.getTableName();
Path tableDir = CommonFSUtils.getTableDir(rootdir, tableName);
Path p = writeTableDescriptor(fs, td, tableDir, null);
if (p == null) {
- throw new IOException("Failed update hbase:meta table descriptor");
+ throw new IOException("Failed update " + MetaTableName.getInstance() + " table descriptor");
}
- LOG.info("Updated hbase:meta table descriptor to {}", p);
+ LOG.info("Updated {} table descriptor to {}", MetaTableName.getInstance(), p);
return td;
}
@@ -198,7 +199,7 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
// the META table data goes to File mode BC only. Test how that affect the system. If too much,
// we have to rethink about adding back the setCacheDataInL1 for META table CFs.
- return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+ return TableDescriptorBuilder.newBuilder(MetaTableName.getInstance())
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
.setMaxVersions(
conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS))
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index c3eafa7c11d1..9eff10a0b160 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -81,6 +81,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@@ -540,7 +541,7 @@ public void run() {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
- meta = connection.getTable(TableName.META_TABLE_NAME);
+ meta = connection.getTable(MetaTableName.getInstance());
status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS,
Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));
}
@@ -660,17 +661,19 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, Interr
reportUnknownServers();
// Check if hbase:meta is found only once and in the right place
if (!checkMetaRegion()) {
- String errorMsg = "hbase:meta table is not consistent. ";
+ String errorMsg = MetaTableName.getInstance() + " table is not consistent. ";
if (shouldFixAssignments()) {
- errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";
+ errorMsg += "HBCK will try fixing it. Rerun once " + MetaTableName.getInstance()
+ + " is back " + "to consistent state.";
} else {
- errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";
+ errorMsg += "Run HBCK with proper fix options to fix " + MetaTableName.getInstance()
+ + " inconsistency.";
}
errors.reportError(errorMsg + " Exiting...");
return -2;
}
// Not going with further consistency check for tables when hbase:meta itself is not consistent.
- LOG.info("Loading regionsinfo from the hbase:meta table");
+ LOG.info("Loading regionsinfo from the {} table", MetaTableName.getInstance());
boolean success = loadMetaEntries();
if (!success) return -1;
@@ -1219,8 +1222,8 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO
* TODO -- need to add tests for this.
*/
private void reportEmptyMetaCells() {
- errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: "
- + emptyRegionInfoQualifiers.size());
+ errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + MetaTableName.getInstance()
+ + ": " + emptyRegionInfoQualifiers.size());
if (details) {
for (Result r : emptyRegionInfoQualifiers) {
errors.print(" " + r);
@@ -1371,7 +1374,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
*/
public void fixEmptyMetaCells() throws IOException {
if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) {
- LOG.info("Trying to fix empty REGIONINFO_QUALIFIER hbase:meta rows.");
+ LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", MetaTableName.getInstance());
for (Result region : emptyRegionInfoQualifiers) {
deleteMetaRegion(region.getRow());
errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL);
@@ -1574,8 +1577,8 @@ private void loadTableStates() throws IOException {
// Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it
// has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in
// meantime.
- this.tableStates.put(TableName.META_TABLE_NAME,
- new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+ this.tableStates.put(MetaTableName.getInstance(),
+ new TableState(MetaTableName.getInstance(), TableState.State.ENABLED));
}
/**
@@ -1604,7 +1607,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException {
TableName tableName = CommonFSUtils.getTableName(path);
if (
(!checkMetaOnly && isTableIncluded(tableName))
- || tableName.equals(TableName.META_TABLE_NAME)
+ || tableName.equals(MetaTableName.getInstance())
) {
tableDirs.add(fs.getFileStatus(path));
}
@@ -1649,7 +1652,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException {
*/
private boolean recordMetaRegion() throws IOException {
List locs;
- try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+ try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) {
locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true);
}
if (locs == null || locs.isEmpty()) {
@@ -2019,9 +2022,11 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException {
}
RegionInfo hri = h.getRegion();
if (hri == null) {
- LOG.warn("Unable to close region " + hi.getRegionNameAsString()
- + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":"
- + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.");
+ LOG.warn(
+ "Unable to close region " + hi.getRegionNameAsString()
+ + " because {} had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":"
+ + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.",
+ MetaTableName.getInstance());
continue;
}
// close the region -- close files and remove assignment
@@ -2140,8 +2145,9 @@ else if (!inMeta && !inHdfs && !isDeployed) {
assert false : "Entry for region with no data";
} else if (!inMeta && !inHdfs && isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_META_HDFS,
- "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but "
- + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn()));
+ "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in "
+ + MetaTableName.getInstance() + " but " + "deployed on "
+ + Joiner.on(", ").join(hbi.getDeployedOn()));
if (shouldFixAssignments()) {
undeployRegions(hbi);
}
@@ -2155,8 +2161,9 @@ else if (!inMeta && !inHdfs && !isDeployed) {
+ " got merge recently, its file(s) will be cleaned by CatalogJanitor later");
return;
}
- errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName
- + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server");
+ errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+ "Region " + descriptiveName + " on HDFS, but not listed in " + MetaTableName.getInstance()
+ + " or deployed on any region server");
// restore region consistency of an adopted orphan
if (shouldFixMeta()) {
if (!hbi.isHdfsRegioninfoPresent()) {
@@ -2196,7 +2203,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
}
}
- LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI());
+ LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), MetaTableName.getInstance());
int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2224,7 +2231,8 @@ else if (!inMeta && !inHdfs && !isDeployed) {
return;
}
- LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI());
+ LOG.info("Patching {} with with .regioninfo: " + hbi.getHdfsHRI(),
+ MetaTableName.getInstance());
int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2301,9 +2309,9 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
} else if (inMeta && inHdfs && isMultiplyDeployed) {
errors.reportError(ERROR_CODE.MULTI_DEPLOYED,
- "Region " + descriptiveName + " is listed in hbase:meta on region server "
- + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers "
- + Joiner.on(", ").join(hbi.getDeployedOn()));
+ "Region " + descriptiveName + " is listed in " + MetaTableName.getInstance()
+ + " on region server " + hbi.getMetaEntry().regionServer + " but is multiply assigned"
+ + " to region servers " + Joiner.on(", ").join(hbi.getDeployedOn()));
// If we are trying to fix the errors
if (shouldFixAssignments()) {
errors.print("Trying to fix assignment error...");
@@ -2313,8 +2321,8 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
} else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) {
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META,
- "Region " + descriptiveName + " listed in hbase:meta on region server "
- + hbi.getMetaEntry().regionServer + " but found on region server "
+ "Region " + descriptiveName + " listed in " + MetaTableName.getInstance()
+ + " on region server " + hbi.getMetaEntry().regionServer + " but found on region server "
+ hbi.getDeployedOn().get(0));
// If we are trying to fix the errors
if (shouldFixAssignments()) {
@@ -2599,7 +2607,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
metaRegions.put(value.getReplicaId(), value);
}
}
- int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME).getRegionReplication();
+ int metaReplication = admin.getDescriptor(MetaTableName.getInstance()).getRegionReplication();
boolean noProblem = true;
// There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas
// Check the deployed servers. It should be exactly one server for each replica.
@@ -2614,11 +2622,12 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
if (servers.isEmpty()) {
assignMetaReplica(i);
} else if (servers.size() > 1) {
- errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId "
- + metaHbckRegionInfo.getReplicaId() + " is found on more than one region.");
+ errors.reportError(ERROR_CODE.MULTI_META_REGION,
+ MetaTableName.getInstance() + ", replicaId " + metaHbckRegionInfo.getReplicaId()
+ + " is found on more than one region.");
if (shouldFixAssignments()) {
- errors.print("Trying to fix a problem with hbase:meta, replicaId "
- + metaHbckRegionInfo.getReplicaId() + "..");
+ errors.print("Trying to fix a problem with " + MetaTableName.getInstance()
+ + ", replicaId " + metaHbckRegionInfo.getReplicaId() + "..");
setShouldRerun();
// try fix it (treat is a dupe assignment)
HBaseFsckRepair.fixMultiAssignment(connection,
@@ -2631,11 +2640,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
for (Map.Entry entry : metaRegions.entrySet()) {
noProblem = false;
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
- "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed "
- + metaRegions.size());
+ MetaTableName.getInstance() + " replicas are deployed in excess. Configured "
+ + metaReplication + ", deployed " + metaRegions.size());
if (shouldFixAssignments()) {
- errors.print(
- "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta..");
+ errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of "
+ + MetaTableName.getInstance() + "..");
setShouldRerun();
unassignMetaReplica(entry.getValue());
}
@@ -2655,9 +2664,9 @@ private void unassignMetaReplica(HbckRegionInfo hi)
private void assignMetaReplica(int replicaId)
throws IOException, KeeperException, InterruptedException {
errors.reportError(ERROR_CODE.NO_META_REGION,
- "hbase:meta, replicaId " + replicaId + " is not found on any region.");
+ MetaTableName.getInstance() + ", replicaId " + replicaId + " is not found on any region.");
if (shouldFixAssignments()) {
- errors.print("Trying to fix a problem with hbase:meta..");
+ errors.print("Trying to fix a problem with " + MetaTableName.getInstance() + "..");
setShouldRerun();
// try to fix it (treat it as unassigned region)
RegionInfo h = RegionReplicaUtil
@@ -2693,7 +2702,7 @@ public boolean visit(Result result) throws IOException {
if (rl == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
- "Empty REGIONINFO_QUALIFIER found in hbase:meta");
+ "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance());
return true;
}
ServerName sn = null;
@@ -2703,7 +2712,7 @@ public boolean visit(Result result) throws IOException {
) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
- "Empty REGIONINFO_QUALIFIER found in hbase:meta");
+ "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance());
return true;
}
RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion();
@@ -2731,7 +2740,8 @@ public boolean visit(Result result) throws IOException {
} else if (previous.getMetaEntry() == null) {
previous.setMetaEntry(m);
} else {
- throw new IOException("Two entries in hbase:meta are same " + previous);
+ throw new IOException(
+ "Two entries in " + MetaTableName.getInstance() + " are same " + previous);
}
}
List mergeParents = CatalogFamilyFormat.getMergeRegions(result.rawCells());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 6ead66c16d9e..e3516ed855b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -27,8 +27,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
@@ -149,7 +149,7 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri,
Collection servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
- Table meta = conn.getTable(TableName.META_TABLE_NAME);
+ Table meta = conn.getTable(MetaTableName.getInstance());
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random rand = ThreadLocalRandom.current();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index c1f98edd75ab..7321d7da178d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -56,6 +56,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
@@ -586,13 +587,13 @@ private void unloadRegions(ServerName server, List regionServers,
// For isolating hbase:meta, it should move explicitly in Ack mode,
// hence the forceMoveRegionByAck = true.
if (!metaSeverName.equals(server)) {
- LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " is on server "
- + metaSeverName + " moving to " + server);
+ LOG.info("Region of {} {} is on server {} moving to {}", MetaTableName.getInstance(),
+ metaRegionInfo.getEncodedName(), metaSeverName, server);
submitRegionMovesWhileUnloading(metaSeverName, Collections.singletonList(server),
movedRegions, Collections.singletonList(metaRegionInfo), true);
} else {
- LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " already exists"
- + " on server : " + server);
+ LOG.info("Region of {} {} already exists on server: {}", MetaTableName.getInstance(),
+ metaRegionInfo.getEncodedName(), server);
}
isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
index b8f095eb03df..36ecfbd1a247 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hbase.wal;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.HashMap;
@@ -35,6 +33,7 @@
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ExtendedCell;
import org.apache.hadoop.hbase.MetaCellComparator;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -78,7 +77,7 @@ public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter,
void append(RegionEntryBuffer buffer) throws IOException {
Map> familyCells = new HashMap<>();
Map familySeqIds = new HashMap<>();
- boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME);
+ boolean isMetaTable = buffer.tableName.equals(MetaTableName.getInstance());
// First iterate all Cells to find which column families are present and to stamp Cell with
// sequence id.
for (WAL.Entry entry : buffer.entryBuffer) {
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
index b965241afe2a..baaf6b68b153 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
@@ -20,6 +20,7 @@
<%@ page contentType="text/html;charset=UTF-8"
import="java.util.*"
+ import="org.apache.hadoop.hbase.MetaTableName"
import="org.apache.hadoop.hbase.NamespaceDescriptor"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.master.HMaster"
@@ -56,7 +57,7 @@
<%= frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + "%" : "n/a" %> |
<% } %>
<% String description = null;
- if (tableName.equals(TableName.META_TABLE_NAME)){
+ if (tableName.equals(MetaTableName.getInstance())){
description = "The hbase:meta table holds references to all User Table regions.";
} else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){
description = "The hbase:canary table is used to sniff the write availability of"
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index d88d968e199f..d838cd3c50bd 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -41,6 +41,7 @@
import="org.apache.hadoop.hbase.ServerMetrics"
import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.Size"
+ import="org.apache.hadoop.hbase.MetaTableName"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.client.AsyncAdmin"
import="org.apache.hadoop.hbase.client.AsyncConnection"
@@ -196,7 +197,7 @@
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf);
int numMetaReplicas =
- master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication();
+ master.getTableDescriptors().get(MetaTableName.getInstance()).getRegionReplication();
Map frags = null;
if (showFragmentation) {
frags = FSUtils.getTableFragmentation(master);
@@ -317,7 +318,7 @@
<% //Meta table.
- if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) { %>
+ if(fqtn.equals(MetaTableName.getInstance().getNameAsString())) { %>
Table Regions
@@ -653,7 +654,7 @@