From 900f4f2d9b59ee4543f0d860c988af587b2b088c Mon Sep 17 00:00:00 2001
From: Kota-SH
Date: Mon, 15 Dec 2025 18:13:59 -0500
Subject: [PATCH 1/6] HBASE-29691: Change TableName.META_TABLE_NAME from being
a global static: org.apache.hadoop.hbase
---
.../hadoop/hbase/ClientMetaTableAccessor.java | 2 +-
.../hadoop/hbase/CellComparatorImpl.java | 2 +-
.../org/apache/hadoop/hbase/HConstants.java | 2 +-
.../hbase/InnerStoreCellComparator.java | 2 +-
.../apache/hadoop/hbase/MetaTableName.java | 72 +++++++++++++++++++
.../org/apache/hadoop/hbase/TableName.java | 27 +++++--
.../hadoop/hbase/TestCellComparator.java | 24 +++----
.../org/apache/hadoop/hbase/TestKeyValue.java | 24 +++----
.../hbase/TestClientClusterMetrics.java | 22 +++---
.../util/ProcessBasedLocalHBaseCluster.java | 2 +-
.../hadoop/hbase/util/RestartMetaTest.java | 2 +-
.../hbase/IntegrationTestMetaReplicas.java | 2 +-
.../hbase/mttr/IntegrationTestMTTR.java | 2 +-
.../hadoop/hbase/MetaTableAccessor.java | 8 +--
.../apache/hadoop/hbase/HBaseTestingUtil.java | 16 ++---
.../apache/hadoop/hbase/TestInfoServers.java | 2 +-
.../org/apache/hadoop/hbase/HBaseCluster.java | 2 +-
.../hadoop/hbase/HBaseTestingUtility.java | 20 +++---
18 files changed, 161 insertions(+), 72 deletions(-)
create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
index 42bfd757e0d1..bf8513ee959d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java
@@ -196,7 +196,7 @@ private static CompletableFuture>> getTableReg
final AsyncTable metaTable, final TableName tableName,
final boolean excludeOfflinedSplitParents) {
CompletableFuture>> future = new CompletableFuture<>();
- if (TableName.META_TABLE_NAME.equals(tableName)) {
+ if (MetaTableName.getInstance().equals(tableName)) {
future.completeExceptionally(new IOException(
"This method can't be used to locate meta regions;" + " use MetaTableLocator instead"));
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
index 0e6a53ca7c47..49eb3b9cce62 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparatorImpl.java
@@ -787,7 +787,7 @@ public static CellComparator getCellComparator(TableName tableName) {
*/
public static CellComparator getCellComparator(byte[] tableName) {
// FYI, TableName.toBytes does not create an array; just returns existing array pointer.
- return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes())
+ return Bytes.equals(tableName, MetaTableName.getInstance().toBytes())
? MetaCellComparator.META_COMPARATOR
: CellComparatorImpl.COMPARATOR;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1051686d32e8..2c0fcafabfca 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1209,7 +1209,7 @@ public enum OperationStatusCode {
@Deprecated
public static final List HBASE_NON_USER_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList(
- (String[]) ArrayUtils.addAll(new String[] { TableName.META_TABLE_NAME.getNameAsString() },
+ (String[]) ArrayUtils.addAll(new String[] { MetaTableName.getInstance().getNameAsString() },
HBASE_NON_TABLE_DIRS.toArray())));
/** Health script related settings. */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java
index 7f6e87ebf911..e2fd632be19f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/InnerStoreCellComparator.java
@@ -75,7 +75,7 @@ public static CellComparator getInnerStoreCellComparator(TableName tableName) {
* @return CellComparator to use going off the {@code tableName} passed.
*/
public static CellComparator getInnerStoreCellComparator(byte[] tableName) {
- return Bytes.equals(tableName, TableName.META_TABLE_NAME.toBytes())
+ return Bytes.equals(tableName, MetaTableName.getInstance().toBytes())
? MetaCellComparator.META_COMPARATOR
: InnerStoreCellComparator.INNER_STORE_COMPARATOR;
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
new file mode 100644
index 000000000000..5859e0f5d305
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
@@ -0,0 +1,72 @@
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hbase.thirdparty.com.google.common.base.Strings;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Singleton class for managing the META_TABLE_NAME instance.
+ * This allows the meta table name to be overridden for testing using reflection.
+ */
+public class MetaTableName {
+ private static final Logger LOG = LoggerFactory.getLogger(MetaTableName.class);
+
+ /**
+ * The singleton instance of the meta table name.
+ * This field can be overridden for testing using reflection.
+ */
+ private static volatile TableName instance;
+
+ private MetaTableName() {
+ // Private constructor to prevent instantiation
+ }
+
+ /**
+ * Get the singleton instance of the meta table name.
+ * Initializes lazily using the default configuration if not already set.
+ *
+ * @return The meta table name instance
+ */
+ public static TableName getInstance() {
+ if (instance == null) {
+ synchronized (MetaTableName.class) {
+ if (instance == null) {
+ instance = initializeHbaseMetaTableName(HBaseConfiguration.create());
+ LOG.info("Meta table name initialized: {}", instance);
+ }
+ }
+ }
+ return instance;
+ }
+
+ /**
+ * Initialize the meta table name from the given configuration.
+ *
+ * @param conf The configuration to use
+ * @return The initialized meta table name
+ */
+ private static TableName initializeHbaseMetaTableName(Configuration conf) {
+ String suffix_val = conf.get(HConstants.HBASE_META_TABLE_SUFFIX,
+ HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE);
+ LOG.info("Meta table suffix value: {}", suffix_val);
+ if (Strings.isNullOrEmpty(suffix_val)) {
+ return TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
+ } else {
+ return TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta_" + suffix_val);
+ }
+ }
+
+ /**
+ * Get the instance field for reflection-based testing.
+ * This method is package-private to allow test classes to access the field.
+ *
+ * @return The Field object for the instance field
+ */
+ static java.lang.reflect.Field getInstanceField() throws NoSuchFieldException {
+ java.lang.reflect.Field field = MetaTableName.class.getDeclaredField("instance");
+ field.setAccessible(true);
+ return field;
+ }
+}
+
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index b6d854c13784..6fdfc1edf001 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -23,10 +23,15 @@
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
-
+import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import com.google.errorprone.annotations.RestrictedApi;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+import org.apache.hbase.thirdparty.com.google.common.base.Strings;
/**
* Immutable POJO class for representing a table name. Which is of the form: <table
@@ -43,8 +48,11 @@
*
*/
@InterfaceAudience.Public
+@InterfaceStability.Stable
public final class TableName implements Comparable {
+ private static final Logger LOG = LoggerFactory.getLogger(TableName.class);
+
/** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */
private static final Set tableCache = new CopyOnWriteArraySet<>();
@@ -65,9 +73,18 @@ public final class TableName implements Comparable {
public static final String VALID_USER_TABLE_REGEX = "(?:(?:(?:" + VALID_NAMESPACE_REGEX + "\\"
+ NAMESPACE_DELIM + ")?)" + "(?:" + VALID_TABLE_QUALIFIER_REGEX + "))";
- /** The hbase:meta table's name. */
- public static final TableName META_TABLE_NAME =
- valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
+ /**
+ * The name of hbase meta table could either be hbase:meta_xxx or 'hbase:meta' otherwise. Config
+ * hbase.meta.table.suffix will govern the decision of adding suffix to the habase:meta
+ *
+ * This field is initialized from the MetaTableName singleton and can be overridden for testing
+ * by modifying the singleton instance via reflection.
+ *
+ * @deprecated Use {@link MetaTableName#getInstance()} instead. This field will be removed in a
+ * future version.
+ */
+ @Deprecated
+ public static TableName META_TABLE_NAME;
/**
* The Namespace table's name.
@@ -87,7 +104,7 @@ public final class TableName implements Comparable {
/** Returns True if tn is the hbase:meta table name. */
public static boolean isMetaTableName(final TableName tn) {
- return tn.equals(TableName.META_TABLE_NAME);
+ return tn.equals(MetaTableName.getInstance());
}
/**
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
index 553b39311369..68666e3552f0 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellComparator.java
@@ -197,38 +197,38 @@ public void testMetaComparisons2() {
CellComparator c = MetaCellComparator.META_COMPARATOR;
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)))
== 0);
Cell a = createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now));
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now));
Cell b = createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now));
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now));
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now)))
> 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)))
== 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)))
< 0);
assertTrue(c.compare(
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)),
createByteBufferKeyValueFromKeyValue(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)))
> 0);
}
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
index 1644a6f1fce7..c06d5569e00d 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
@@ -198,31 +198,31 @@ public void testKeyValueBorderCases() {
private void metacomparisons(final CellComparatorImpl c) {
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))
== 0);
KeyValue a =
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now);
KeyValue b =
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now);
assertTrue(c.compare(a, b) < 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,2"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",a,,0,1"), now))
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,2"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",a,,0,1"), now))
> 0);
}
private void comparisons(final CellComparatorImpl c) {
long now = EnvironmentEdgeManager.currentTime();
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) == 0);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)) == 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now)) < 0);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now)) < 0);
assertTrue(c.compare(
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,2"), now),
- new KeyValue(Bytes.toBytes(TableName.META_TABLE_NAME.getNameAsString() + ",,1"), now)) > 0);
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,2"), now),
+ new KeyValue(Bytes.toBytes(MetaTableName.getInstance().getNameAsString() + ",,1"), now)) > 0);
}
@Test
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
index a459074ba27d..54ceeecfec21 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/TestClientClusterMetrics.java
@@ -222,16 +222,16 @@ public void testRegionStatesCount() throws Exception {
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getClosedRegions(), 0);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getClosedRegions(), 0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getSplitRegions(), 0);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getSplitRegions(), 0);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1);
@@ -253,12 +253,12 @@ public void testRegionStatesWithSplit() throws Exception {
ClusterMetrics metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 1);
@@ -273,12 +273,12 @@ public void testRegionStatesWithSplit() throws Exception {
metrics = ADMIN.getClusterMetrics();
Assert.assertEquals(metrics.getTableRegionStatesCount().size(), 2);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getRegionsInTransition(),
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getRegionsInTransition(),
0);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getOpenRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getOpenRegions(), 1);
Assert.assertEquals(
- metrics.getTableRegionStatesCount().get(TableName.META_TABLE_NAME).getTotalRegions(), 1);
+ metrics.getTableRegionStatesCount().get(MetaTableName.getInstance()).getTotalRegions(), 1);
Assert.assertEquals(
metrics.getTableRegionStatesCount().get(TABLE_NAME).getRegionsInTransition(), 0);
Assert.assertEquals(metrics.getTableRegionStatesCount().get(TABLE_NAME).getOpenRegions(), 2);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
index 5e8447c2ad81..3203d98df2f4 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
@@ -165,7 +165,7 @@ public void startHBase() throws IOException {
int attemptsLeft = 10;
while (attemptsLeft-- > 0) {
try {
- testUtil.getConnection().getTable(TableName.META_TABLE_NAME);
+ testUtil.getConnection().getTable(MetaTableName.getInstance());
} catch (Exception e) {
LOG.info("Waiting for HBase to startup. Retries left: " + attemptsLeft, e);
Threads.sleep(1000);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
index 144ea6503b06..8274993aa3ac 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
@@ -124,7 +124,7 @@ protected int doWork() throws Exception {
LOG.debug("Trying to scan meta");
- Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
+ Table metaTable = connection.getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(new Scan());
Result result;
while ((result = scanner.next()) != null) {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
index a8c3a16d13dc..fd07d7e1dc6a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
@@ -56,7 +56,7 @@ public static void setUp() throws Exception {
1000);
// Make sure there are three servers.
util.initializeCluster(3);
- HBaseTestingUtil.setReplicas(util.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(util.getAdmin(), MetaTableName.getInstance(), 3);
ZKWatcher zkw = util.getZooKeeperWatcher();
Configuration conf = util.getConfiguration();
String baseZNode =
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index 2bb87ca8f2f6..fd1f6dd5ac32 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -184,7 +184,7 @@ private static void setupActions() throws IOException {
// Set up the action that will move the regions of meta.
moveMetaRegionsAction = new MoveRegionsOfTableAction(sleepTime,
- MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, TableName.META_TABLE_NAME);
+ MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, MetaTableName.getInstance());
// Set up the action that will move the regions of our table.
moveRegionAction = new MoveRegionsOfTableAction(sleepTime,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 05b049e27dbc..24bbfb5079f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -151,7 +151,7 @@ public static Table getMetaHTable(final Connection connection) throws IOExceptio
if (connection.isClosed()) {
throw new IOException("connection is closed");
}
- return connection.getTable(TableName.META_TABLE_NAME);
+ return connection.getTable(MetaTableName.getInstance());
}
/**
@@ -366,7 +366,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) {
public static List> getTableRegionsAndLocations(
Connection connection, @Nullable final TableName tableName,
final boolean excludeOfflinedSplitParents) throws IOException {
- if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName != null && tableName.equals(MetaTableName.getInstance())) {
throw new IOException(
"This method can't be used to locate meta regions; use MetaTableLocator instead");
}
@@ -592,7 +592,7 @@ public static PairOfSameType getDaughterRegions(Result data) {
*/
@Nullable
public static TableState getTableState(Connection conn, TableName tableName) throws IOException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
return new TableState(tableName, TableState.State.ENABLED);
}
Table metaHTable = getMetaHTable(conn);
@@ -859,7 +859,7 @@ public static void addRegionsToMeta(Connection connection, List regi
private static void updateTableState(Connection connection, TableState state) throws IOException {
Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime());
putToMetaTable(connection, put);
- LOG.info("Updated {} in hbase:meta", state);
+ LOG.info("Updated {} in {}", state, MetaTableName.getInstance());
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
index c885d9868844..00570b60b693 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
@@ -902,7 +902,7 @@ public SingleProcessHBaseCluster startMiniHBaseCluster(StartTestingClusterOption
// Populate the master address configuration from mini cluster configuration.
conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
// Don't leave here till we've done a successful scan of the hbase:meta
- try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
+ try (Table t = getConnection().getTable(MetaTableName.getInstance());
ResultScanner s = t.getScanner(new Scan())) {
for (;;) {
if (s.next() == null) {
@@ -1025,7 +1025,7 @@ public void restartHBaseCluster(StartTestingClusterOption option)
option.getMasterClass(), option.getRsClass());
// Don't leave here till we've done a successful scan of the hbase:meta
Connection conn = ConnectionFactory.createConnection(this.conf);
- Table t = conn.getTable(TableName.META_TABLE_NAME);
+ Table t = conn.getTable(MetaTableName.getInstance());
ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) {
// do nothing
@@ -2169,7 +2169,7 @@ public String checksumRows(final Table table) throws Exception {
*/
public List createMultiRegionsInMeta(final Configuration conf,
final TableDescriptor htd, byte[][] startKeys) throws IOException {
- try (Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table meta = getConnection().getTable(MetaTableName.getInstance())) {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
List newRegions = new ArrayList<>(startKeys.length);
MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
@@ -2517,7 +2517,7 @@ public void process(WatchedEvent watchedEvent) {
monitor.close();
if (checkStatus) {
- getConnection().getTable(TableName.META_TABLE_NAME).close();
+ getConnection().getTable(MetaTableName.getInstance()).close();
}
}
@@ -3042,7 +3042,7 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce
* Waith until all system table's regions get assigned
*/
public void waitUntilAllSystemRegionsAssigned() throws IOException {
- waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
+ waitUntilAllRegionsAssigned(MetaTableName.getInstance());
}
/**
@@ -3055,7 +3055,7 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException {
public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
throws IOException {
if (!TableName.isMetaTableName(tableName)) {
- try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (final Table meta = getConnection().getTable(MetaTableName.getInstance())) {
LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = "
+ timeout + "ms");
waitFor(timeout, 200, true, new ExplainingPredicate() {
@@ -3275,7 +3275,7 @@ public Table createRandomTable(TableName tableName, final Collection fam
Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions);
if (hbaseCluster != null) {
- getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
+ getMiniHBaseCluster().flushcache(MetaTableName.getInstance());
}
BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
@@ -3356,7 +3356,7 @@ public static void waitForHostPort(String host, int port) throws IOException {
}
public static int getMetaRSPort(Connection connection) throws IOException {
- try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+ try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) {
return locator.getRegionLocation(Bytes.toBytes("")).getPort();
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java
index a894bbcd0aeb..cf0ae4f7eb80 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java
@@ -91,7 +91,7 @@ public void testGetMasterInfoPort() throws Exception {
@Test
public void testInfoServersRedirect() throws Exception {
// give the cluster time to start up
- UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close();
+ UTIL.getConnection().getTable(MetaTableName.getInstance()).close();
int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort();
assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "master.jsp");
assertContainsContent(new URL("http://localhost:" + port + "/master-status"), "master.jsp");
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
index 27f3dd4f43ab..eb40f4eee439 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -343,7 +343,7 @@ public boolean restoreClusterMetrics(ClusterMetrics desiredStatus) throws IOExce
* Get the ServerName of region server serving the first hbase:meta region
*/
public ServerName getServerHoldingMeta() throws IOException {
- return getServerHoldingRegion(TableName.META_TABLE_NAME,
+ return getServerHoldingRegion(MetaTableName.getInstance(),
RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName());
}
diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 1dea0ba4c367..f5d80730820f 100644
--- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1104,7 +1104,7 @@ public MiniHBaseCluster startMiniHBaseCluster(StartMiniClusterOption option)
// Populate the master address configuration from mini cluster configuration.
conf.set(HConstants.MASTER_ADDRS_KEY, MasterRegistry.getMasterAddr(c));
// Don't leave here till we've done a successful scan of the hbase:meta
- try (Table t = getConnection().getTable(TableName.META_TABLE_NAME);
+ try (Table t = getConnection().getTable(MetaTableName.getInstance());
ResultScanner s = t.getScanner(new Scan())) {
for (;;) {
if (s.next() == null) {
@@ -1226,7 +1226,7 @@ public void restartHBaseCluster(StartMiniClusterOption option)
option.getMasterClass(), option.getRsClass());
// Don't leave here till we've done a successful scan of the hbase:meta
Connection conn = ConnectionFactory.createConnection(this.conf);
- Table t = conn.getTable(TableName.META_TABLE_NAME);
+ Table t = conn.getTable(MetaTableName.getInstance());
ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) {
// do nothing
@@ -2394,7 +2394,7 @@ public String checksumRows(final Table table) throws Exception {
*/
public List createMultiRegionsInMeta(final Configuration conf,
final TableDescriptor htd, byte[][] startKeys) throws IOException {
- Table meta = getConnection().getTable(TableName.META_TABLE_NAME);
+ Table meta = getConnection().getTable(MetaTableName.getInstance());
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
List newRegions = new ArrayList<>(startKeys.length);
MetaTableAccessor.updateTableState(getConnection(), htd.getTableName(),
@@ -2476,7 +2476,7 @@ public static HRegion createRegionAndWAL(final RegionInfo info, final Path rootD
*/
public List getMetaTableRows() throws IOException {
// TODO: Redo using MetaTableAccessor class
- Table t = getConnection().getTable(TableName.META_TABLE_NAME);
+ Table t = getConnection().getTable(MetaTableName.getInstance());
List rows = new ArrayList<>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
@@ -2494,7 +2494,7 @@ public List getMetaTableRows() throws IOException {
*/
public List getMetaTableRows(TableName tableName) throws IOException {
// TODO: Redo using MetaTableAccessor.
- Table t = getConnection().getTable(TableName.META_TABLE_NAME);
+ Table t = getConnection().getTable(MetaTableName.getInstance());
List rows = new ArrayList<>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
@@ -2824,7 +2824,7 @@ public void process(WatchedEvent watchedEvent) {
monitor.close();
if (checkStatus) {
- getConnection().getTable(TableName.META_TABLE_NAME).close();
+ getConnection().getTable(MetaTableName.getInstance()).close();
}
}
@@ -3347,7 +3347,7 @@ public void waitUntilAllRegionsAssigned(final TableName tableName) throws IOExce
* Waith until all system table's regions get assigned
*/
public void waitUntilAllSystemRegionsAssigned() throws IOException {
- waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
+ waitUntilAllRegionsAssigned(MetaTableName.getInstance());
}
/**
@@ -3360,7 +3360,7 @@ public void waitUntilAllSystemRegionsAssigned() throws IOException {
public void waitUntilAllRegionsAssigned(final TableName tableName, final long timeout)
throws IOException {
if (!TableName.isMetaTableName(tableName)) {
- try (final Table meta = getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (final Table meta = getConnection().getTable(MetaTableName.getInstance())) {
LOG.debug("Waiting until all regions of table " + tableName + " get assigned. Timeout = "
+ timeout + "ms");
waitFor(timeout, 200, true, new ExplainingPredicate() {
@@ -3578,7 +3578,7 @@ public Table createRandomTable(TableName tableName, final Collection fam
Bytes.toBytes(String.format(keyFormat, splitEndKey)), numRegions);
if (hbaseCluster != null) {
- getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
+ getMiniHBaseCluster().flushcache(MetaTableName.getInstance());
}
BufferedMutator mutator = getConnection().getBufferedMutator(tableName);
@@ -3793,7 +3793,7 @@ public static int createPreSplitLoadTestTable(Configuration conf, TableDescripto
}
public static int getMetaRSPort(Connection connection) throws IOException {
- try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+ try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) {
return locator.getRegionLocation(Bytes.toBytes("")).getPort();
}
}
From ee328181b32364665657a644ebbfbeb09ecca786 Mon Sep 17 00:00:00 2001
From: Kota-SH
Date: Mon, 15 Dec 2025 18:43:59 -0500
Subject: [PATCH 2/6] HBASE-29691: Change TableName.META_TABLE_NAME from being
a global static: org.apache.hadoop.hbase.client
---
.../client/AsyncNonMetaRegionLocator.java | 8 +++----
.../hbase/client/AsyncRegionLocator.java | 4 ++--
.../client/AsyncTableRegionLocatorImpl.java | 3 ++-
.../hbase/client/MutableRegionInfo.java | 5 +++--
.../hbase/client/RawAsyncHBaseAdmin.java | 8 +++----
.../hadoop/hbase/client/RegionInfo.java | 3 ++-
.../hbase/client/RegionInfoBuilder.java | 3 ++-
.../hbase/client/RegionInfoDisplay.java | 3 ++-
.../hbase/client/TableDescriptorBuilder.java | 3 ++-
.../client/TestAsyncAdminRpcPriority.java | 3 ++-
.../client/TestAsyncRegionLocatorTracing.java | 15 ++++++-------
.../client/TestAsyncTableRpcPriority.java | 21 ++++++++++---------
.../hbase/client/TestRegionInfoBuilder.java | 3 ++-
.../client/TestTableDescriptorBuilder.java | 5 +++--
.../apache/hadoop/hbase/MetaTableName.java | 2 ++
.../util/ProcessBasedLocalHBaseCluster.java | 1 +
.../hadoop/hbase/util/RestartMetaTest.java | 1 +
.../hbase/mttr/IntegrationTestMTTR.java | 1 +
.../client/AbstractTestRegionLocator.java | 19 +++++++++--------
.../client/MetaWithReplicasTestBase.java | 3 ++-
.../hbase/client/RegionReplicaTestHelper.java | 3 ++-
.../apache/hadoop/hbase/client/TestAdmin.java | 3 ++-
.../hadoop/hbase/client/TestAdmin2.java | 9 ++++----
.../TestAsyncAdminWithRegionReplicas.java | 9 ++++----
.../client/TestAsyncMetaRegionLocator.java | 5 +++--
.../client/TestAsyncNonMetaRegionLocator.java | 5 +++--
.../client/TestAsyncRegionAdminApi2.java | 16 +++++++-------
.../hbase/client/TestAsyncTableAdminApi.java | 8 +++----
.../hbase/client/TestAsyncTableAdminApi2.java | 3 ++-
.../hbase/client/TestAsyncTableAdminApi3.java | 20 +++++++++---------
.../client/TestAsyncTableUseMetaReplicas.java | 5 +++--
...talogReplicaLoadBalanceSimpleSelector.java | 18 ++++++++--------
.../hbase/client/TestCleanupMetaReplica.java | 3 ++-
.../client/TestClientScannerTimeouts.java | 3 ++-
.../client/TestClientSideRegionScanner.java | 9 ++++----
.../hadoop/hbase/client/TestEnableTable.java | 3 ++-
.../hbase/client/TestFromClientSide5.java | 3 ++-
.../TestIncreaseMetaReplicaThroughConfig.java | 3 ++-
.../hbase/client/TestMasterRegistry.java | 3 ++-
.../hadoop/hbase/client/TestMetaCache.java | 3 ++-
.../client/TestMetaRegionLocationCache.java | 3 ++-
.../client/TestMetaWithReplicasBasic.java | 3 ++-
.../TestMetaWithReplicasShutdownHandling.java | 5 +++--
.../TestMultiActionMetricsFromClient.java | 3 ++-
.../hbase/client/TestReplicaWithCluster.java | 3 ++-
.../client/TestRpcConnectionRegistry.java | 3 ++-
.../client/TestSeparateClientZKCluster.java | 9 ++++----
.../TestShutdownOfMetaReplicaHolder.java | 3 ++-
.../hbase/client/TestSnapshotFromClient.java | 3 ++-
.../client/TestZKConnectionRegistry.java | 5 +++--
50 files changed, 166 insertions(+), 122 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index e26fb837b89d..3afe3ae63bed 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -22,7 +22,6 @@
import static org.apache.hadoop.hbase.HConstants.NINES;
import static org.apache.hadoop.hbase.HConstants.USE_META_REPLICAS;
import static org.apache.hadoop.hbase.HConstants.ZEROES;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.createRegionLocations;
import static org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.isGood;
import static org.apache.hadoop.hbase.client.ConnectionConfiguration.HBASE_CLIENT_META_CACHE_INVALIDATE_INTERVAL;
@@ -54,6 +53,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Scan.ReadType;
@@ -238,14 +238,14 @@ private boolean tryComplete(LocateRequest req, CompletableFuture {
+ .createSelector(replicaSelectorClass, MetaTableName.getInstance(), conn, () -> {
int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
try {
RegionLocations metaLocations = conn.registry.getMetaRegionLocations()
.get(conn.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
numOfReplicas = metaLocations.size();
} catch (Exception e) {
- LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
+ LOG.error("Failed to get table {}'s region replication, ", MetaTableName.getInstance(), e);
}
return numOfReplicas;
});
@@ -427,7 +427,7 @@ private void locateInMeta(TableName tableName, LocateRequest req) {
// do nothing
}
- conn.getTable(META_TABLE_NAME).scan(scan, new AdvancedScanResultConsumer() {
+ conn.getTable(MetaTableName.getInstance()).scan(scan, new AdvancedScanResultConsumer() {
private boolean completeNormally = false;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
index 0e872a5b21da..f77a8fcdd492 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY;
import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY;
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
@@ -38,6 +37,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.trace.ConnectionSpanBuilder;
import org.apache.hadoop.hbase.client.trace.TableSpanBuilder;
@@ -217,7 +217,7 @@ void clearCache(TableName tableName) {
new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName);
TraceUtil.trace(() -> {
LOG.debug("Clear meta cache for {}", tableName);
- if (tableName.equals(META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
metaRegionLocator.clearCache();
} else {
nonMetaRegionLocator.clearCache(tableName);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
index b7ec7fcd8725..32349a64651d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
@@ -25,6 +25,7 @@
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
@@ -63,7 +64,7 @@ public CompletableFuture> getAllRegionLocations() {
.thenApply(locs -> Arrays.asList(locs.getRegionLocations()));
}
CompletableFuture> future = ClientMetaTableAccessor
- .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName);
+ .getTableHRegionLocations(conn.getTable(MetaTableName.getInstance()), tableName);
addListener(future, (locs, error) -> locs.forEach(loc -> {
// the cache assumes that all locations have a serverName. only add if that's true
if (loc.getServerName() != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
index d6d8e00f7822..5e629839782f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellComparatorImpl;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
@@ -120,7 +121,7 @@ private static int checkReplicaId(int regionId) {
this.replicaId = checkReplicaId(replicaId);
this.offLine = offLine;
this.regionName = RegionInfo.createRegionName(this.tableName, this.startKey, this.regionId,
- this.replicaId, !this.tableName.equals(TableName.META_TABLE_NAME));
+ this.replicaId, !this.tableName.equals(MetaTableName.getInstance()));
this.encodedName = RegionInfo.encodeRegionName(this.regionName);
this.hashCode = generateHashCode(this.tableName, this.startKey, this.endKey, this.regionId,
this.replicaId, this.offLine, this.regionName);
@@ -232,7 +233,7 @@ public boolean containsRow(byte[] row) {
/** Returns true if this region is a meta region */
@Override
public boolean isMetaRegion() {
- return tableName.equals(TableName.META_TABLE_NAME);
+ return tableName.equals(MetaTableName.getInstance());
}
/** Returns True if has been split and has daughters. */
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 83780a4a1219..0c27605a0646 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.HConstants.HIGH_QOS;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.util.FutureUtils.addListener;
import static org.apache.hadoop.hbase.util.FutureUtils.unwrapCompletionException;
@@ -63,6 +62,7 @@
import org.apache.hadoop.hbase.RegionMetricsBuilder;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException;
@@ -403,7 +403,7 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
AsyncAdminBuilderBase builder) {
this.connection = connection;
this.retryTimer = retryTimer;
- this.metaTable = connection.getTable(META_TABLE_NAME);
+ this.metaTable = connection.getTable(MetaTableName.getInstance());
this.rpcTimeoutNs = builder.rpcTimeoutNs;
this.operationTimeoutNs = builder.operationTimeoutNs;
this.pauseNs = builder.pauseNs;
@@ -995,7 +995,7 @@ List> adminCall(controller, stub,
@Override
public CompletableFuture> getRegions(TableName tableName) {
- if (tableName.equals(META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
return connection.registry.getMetaRegionLocations()
.thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion)
.collect(Collectors.toList()));
@@ -1286,7 +1286,7 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa
* List all region locations for the specific table.
*/
private CompletableFuture> getTableHRegionLocations(TableName tableName) {
- if (TableName.META_TABLE_NAME.equals(tableName)) {
+ if (MetaTableName.getInstance().equals(tableName)) {
CompletableFuture> future = new CompletableFuture<>();
addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> {
if (err != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 10c554e26f79..bc3b48a54a28 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -26,6 +26,7 @@
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
@@ -431,7 +432,7 @@ static byte[] toByteArray(RegionInfo ri) {
*/
static String prettyPrint(final String encodedRegionName) {
if (encodedRegionName.equals("1028785192")) {
- return encodedRegionName + "/hbase:meta";
+ return encodedRegionName + "/" + MetaTableName.getInstance();
}
return encodedRegionName;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index ef927fd3a55b..1c2aab455b55 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
@@ -43,7 +44,7 @@ public class RegionInfoBuilder {
// TODO: How come Meta regions still do not have encoded region names? Fix.
// hbase:meta,,1.1588230740 should be the hbase:meta first region name.
public static final RegionInfo FIRST_META_REGIONINFO =
- new MutableRegionInfo(1L, TableName.META_TABLE_NAME, RegionInfo.DEFAULT_REPLICA_ID);
+ new MutableRegionInfo(1L, MetaTableName.getInstance(), RegionInfo.DEFAULT_REPLICA_ID);
private final TableName tableName;
private byte[] startKey = HConstants.EMPTY_START_ROW;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
index 3f353b5799d4..3b4a4ab022af 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoDisplay.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
@@ -82,7 +83,7 @@ public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuratio
*/
public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
- if (displayKey || ri.getTable().equals(TableName.META_TABLE_NAME)) {
+ if (displayKey || ri.getTable().equals(MetaTableName.getInstance())) {
return ri.getRegionName();
} else {
// create a modified regionname with the startkey replaced but preserving
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index eed1a40a2c2f..a1b766696250 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -38,6 +38,7 @@
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.HBaseException;
@@ -616,7 +617,7 @@ private ModifyableTableDescriptor(final TableName name,
families.forEach(c -> this.families.put(c.getName(), ColumnFamilyDescriptorBuilder.copy(c)));
this.values.putAll(values);
this.values.put(IS_META_KEY,
- new Bytes(Bytes.toBytes(Boolean.toString(name.equals(TableName.META_TABLE_NAME)))));
+ new Bytes(Bytes.toBytes(Boolean.toString(name.equals(MetaTableName.getInstance())))));
}
/**
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
index f65c7ccb6e75..2206a800767e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -195,7 +196,7 @@ public void testCreateSystemTable() {
// that we pass the correct priority
@Test
public void testCreateMetaTable() {
- conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+ conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(MetaTableName.getInstance())
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join();
verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS),
any(CreateTableRequest.class), any());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
index a7df92999d08..a710e53bad95 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MatcherPredicate;
import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
@@ -85,7 +86,7 @@ public class TestAsyncRegionLocatorTracing {
@Before
public void setUp() throws IOException {
- RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+ RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).build();
locs = new RegionLocations(
new HRegionLocation(metaRegionInfo,
ServerName.valueOf("127.0.0.1", 12345, EnvironmentEdgeManager.currentTime())),
@@ -147,30 +148,30 @@ public void testClearCacheServerName() {
@Test
public void testClearCacheTableName() {
- conn.getLocator().clearCache(TableName.META_TABLE_NAME);
+ conn.getLocator().clearCache(MetaTableName.getInstance());
SpanData span = waitSpan("AsyncRegionLocator.clearCache");
assertThat(span,
allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(TableName.META_TABLE_NAME)));
+ buildTableAttributesMatcher(MetaTableName.getInstance())));
}
@Test
public void testGetRegionLocation() {
- conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
+ conn.getLocator().getRegionLocation(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW,
RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join();
SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation");
assertThat(span,
allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(TableName.META_TABLE_NAME),
+ buildTableAttributesMatcher(MetaTableName.getInstance()),
hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions",
locs.getDefaultRegionLocation().getRegion().getRegionNameAsString()))));
}
@Test
public void testGetRegionLocations() {
- conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
+ conn.getLocator().getRegionLocations(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW,
RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join();
SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations");
String[] expectedRegions =
@@ -178,7 +179,7 @@ public void testGetRegionLocations() {
.map(RegionInfo::getRegionNameAsString).toArray(String[]::new);
assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.INTERNAL),
buildConnectionAttributesMatcher(conn),
- buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes(
+ buildTableAttributesMatcher(MetaTableName.getInstance()), hasAttributes(
containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions)))));
}
}
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
index cb5431c35d3e..34e9ba201838 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java
@@ -50,6 +50,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
@@ -237,7 +238,7 @@ public void testGetSystemTable() {
@Test
public void testGetMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).get(new Get(Bytes.toBytes(0))).join();
+ conn.getTable(MetaTableName.getInstance()).get(new Get(Bytes.toBytes(0))).join();
verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any());
}
@@ -268,7 +269,7 @@ public void testPutSystemTable() {
@Test
public void testPutMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).put(new Put(Bytes.toBytes(0))
+ conn.getTable(MetaTableName.getInstance()).put(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -296,7 +297,7 @@ public void testDeleteSystemTable() {
@Test
public void testDeleteMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).delete(new Delete(Bytes.toBytes(0))).join();
+ conn.getTable(MetaTableName.getInstance()).delete(new Delete(Bytes.toBytes(0))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -327,7 +328,7 @@ public void testAppendSystemTable() {
@Test
public void testAppendMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).append(new Append(Bytes.toBytes(0))
+ conn.getTable(MetaTableName.getInstance()).append(new Append(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -355,7 +356,7 @@ public void testIncrementSystemTable() {
@Test
public void testIncrementMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME)
+ conn.getTable(MetaTableName.getInstance())
.incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join();
verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any());
}
@@ -393,7 +394,7 @@ public void testCheckAndPutSystemTable() {
@Test
public void testCheckAndPutMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
.join();
@@ -426,7 +427,7 @@ public void testCheckAndDeleteSystemTable() {
@Test
public void testCheckAndDeleteMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0))
.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))
.join();
@@ -467,7 +468,7 @@ public void testCheckAndMutateSystemTable() throws IOException {
@Test
public void testCheckAndMutateMetaTable() throws IOException {
- conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
+ conn.getTable(MetaTableName.getInstance()).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf"))
.qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v"))
.thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0))))
.join();
@@ -555,7 +556,7 @@ public void testScanSystemTable() throws Exception {
@Test
public void testScanMetaTable() throws Exception {
CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS);
- testForTable(TableName.META_TABLE_NAME, renewFuture, Optional.empty());
+ testForTable(MetaTableName.getInstance(), renewFuture, Optional.empty());
}
private void testForTable(TableName tableName, CompletableFuture renewFuture,
@@ -598,7 +599,7 @@ public void testBatchSystemTable() {
@Test
public void testBatchMetaTable() {
- conn.getTable(TableName.META_TABLE_NAME).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0))))
+ conn.getTable(MetaTableName.getInstance()).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0))))
.join();
verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS),
any(ClientProtos.MultiRequest.class), any());
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
index f74b79a0672e..e01b3b741dcc 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java
@@ -27,6 +27,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@@ -141,7 +142,7 @@ public void testContainsRange() {
@Test
public void testContainsRangeForMetaTable() {
TableDescriptor tableDesc =
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build();
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] row1 = Bytes.toBytes("a,a,0");
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
index 53f33845ef7d..d09f7a225a6e 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
@@ -26,6 +26,7 @@
import java.io.IOException;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.HBaseException;
@@ -59,7 +60,7 @@ public class TestTableDescriptorBuilder {
@Test(expected = IOException.class)
public void testAddCoprocessorTwice() throws IOException {
String cpName = "a.b.c.d";
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setCoprocessor(cpName)
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setCoprocessor(cpName)
.setCoprocessor(cpName).build();
}
@@ -67,7 +68,7 @@ public void testAddCoprocessorTwice() throws IOException {
public void testPb() throws DeserializationException, IOException {
final int v = 123;
TableDescriptor htd =
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v)
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).setMaxFileSize(v)
.setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build();
byte[] bytes = TableDescriptorBuilder.toByteArray(htd);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
index 5859e0f5d305..f4afd8bbe1c7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
@@ -2,6 +2,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hbase.thirdparty.com.google.common.base.Strings;
+import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -9,6 +10,7 @@
* Singleton class for managing the META_TABLE_NAME instance.
* This allows the meta table name to be overridden for testing using reflection.
*/
+@InterfaceAudience.Public
public class MetaTableName {
private static final Logger LOG = LoggerFactory.getLogger(MetaTableName.class);
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
index 3203d98df2f4..6407ff13bbdb 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
diff --git a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
index 8274993aa3ac..dc7d025796bd 100644
--- a/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
+++ b/hbase-diagnostics/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index fd1f6dd5ac32..fbc98f006393 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.NamespaceExistException;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableExistsException;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java
index 031dff736c84..bfed5d2452cf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestRegionLocator.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
@@ -49,7 +50,7 @@ public abstract class AbstractTestRegionLocator {
protected static void startClusterAndCreateTable() throws Exception {
UTIL.startMiniCluster(3);
- HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, REGION_REPLICATION);
+ HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), REGION_REPLICATION);
TableDescriptor td =
TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(REGION_REPLICATION)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
@@ -69,7 +70,7 @@ protected static void startClusterAndCreateTable() throws Exception {
@After
public void tearDownAfterTest() throws IOException {
clearCache(TABLE_NAME);
- clearCache(TableName.META_TABLE_NAME);
+ clearCache(MetaTableName.getInstance());
}
private byte[] getStartKey(int index) {
@@ -171,7 +172,7 @@ private void assertMetaRegionLocation(HRegionLocation loc, int replicaId) {
assertArrayEquals(HConstants.EMPTY_END_ROW, region.getEndKey());
assertEquals(replicaId, region.getReplicaId());
ServerName expected =
- findRegionLocation(TableName.META_TABLE_NAME, region.getStartKey(), replicaId);
+ findRegionLocation(MetaTableName.getInstance(), region.getStartKey(), replicaId);
assertEquals(expected, loc.getServerName());
}
@@ -184,19 +185,19 @@ private void assertMetaRegionLocations(List locs) {
@Test
public void testMeta() throws IOException {
- assertMetaStartOrEndKeys(getStartKeys(TableName.META_TABLE_NAME));
- assertMetaStartOrEndKeys(getEndKeys(TableName.META_TABLE_NAME));
- Pair startEndKeys = getStartEndKeys(TableName.META_TABLE_NAME);
+ assertMetaStartOrEndKeys(getStartKeys(MetaTableName.getInstance()));
+ assertMetaStartOrEndKeys(getEndKeys(MetaTableName.getInstance()));
+ Pair startEndKeys = getStartEndKeys(MetaTableName.getInstance());
assertMetaStartOrEndKeys(startEndKeys.getFirst());
assertMetaStartOrEndKeys(startEndKeys.getSecond());
for (int replicaId = 0; replicaId < REGION_REPLICATION; replicaId++) {
assertMetaRegionLocation(
- getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, replicaId),
+ getRegionLocation(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW, replicaId),
replicaId);
}
assertMetaRegionLocations(
- getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW));
- assertMetaRegionLocations(getAllRegionLocations(TableName.META_TABLE_NAME));
+ getRegionLocations(MetaTableName.getInstance(), HConstants.EMPTY_START_ROW));
+ assertMetaRegionLocations(getAllRegionLocations(MetaTableName.getInstance()));
}
protected abstract byte[][] getStartKeys(TableName tableName) throws IOException;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
index 2b6b3d017fcb..0fb896111a42 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/MetaWithReplicasTestBase.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
@@ -58,7 +59,7 @@ protected static void startCluster() throws Exception {
.numAlwaysStandByMasters(1).numMasters(1).numRegionServers(REGIONSERVERS_COUNT).build();
TEST_UTIL.startMiniCluster(option);
Admin admin = TEST_UTIL.getAdmin();
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 3);
AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
Set sns = new HashSet();
ServerName hbaseMetaServerName = am.getRegionStates()
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java
index a98ae217e3c2..38b4ee29a434 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/RegionReplicaTestHelper.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.regionserver.Region;
@@ -47,7 +48,7 @@ static void waitUntilAllMetaReplicasAreReady(HBaseTestingUtil util, ConnectionRe
throws IOException {
Configuration conf = util.getConfiguration();
int regionReplicaCount =
- util.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication();
+ util.getAdmin().getDescriptor(MetaTableName.getInstance()).getRegionReplication();
Waiter.waitFor(conf, conf.getLong("hbase.client.sync.wait.timeout.msec", 60000), 200, true,
new ExplainingPredicate() {
@Override
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
index 68a841b7d671..61a390b6ba51 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
@@ -64,7 +65,7 @@ public class TestAdmin extends TestAdminBase {
@Test
public void testListTableDescriptors() throws IOException {
TableDescriptor metaTableDescriptor =
- TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME);
+ TEST_UTIL.getAdmin().getDescriptor(MetaTableName.getInstance());
List tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(true);
assertTrue(tableDescriptors.contains(metaTableDescriptor));
tableDescriptors = TEST_UTIL.getAdmin().listTableDescriptors(false);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 2cf088fa6a82..1e4f2c19cc3d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -84,13 +85,13 @@ public class TestAdmin2 extends TestAdminBase {
public void testCreateBadTables() throws IOException {
String msg = null;
try {
- ADMIN.createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build());
+ ADMIN.createTable(TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build());
} catch (TableExistsException e) {
msg = e.toString();
}
assertTrue("Unexcepted exception message " + msg,
msg != null && msg.startsWith(TableExistsException.class.getName())
- && msg.contains(TableName.META_TABLE_NAME.getNameAsString()));
+ && msg.contains(MetaTableName.getInstance().getNameAsString()));
// Now try and do concurrent creation with a bunch of threads.
TableDescriptor tableDescriptor =
@@ -456,7 +457,7 @@ private void setUpforLogRolling() {
private HRegionServer startAndWriteData(TableName tableName, byte[] value)
throws IOException, InterruptedException {
// When the hbase:meta table can be opened, the region servers are running
- TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close();
+ TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()).close();
// Create the test table and open it
TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName)
@@ -486,7 +487,7 @@ private HRegionServer startAndWriteData(TableName tableName, byte[] value)
@Test
public void testDisableCatalogTable() throws Exception {
try {
- ADMIN.disableTable(TableName.META_TABLE_NAME);
+ ADMIN.disableTable(MetaTableName.getInstance());
fail("Expected to throw ConstraintException");
} catch (ConstraintException e) {
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java
index bb0eb31d2549..08f72317ef56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminWithRegionReplicas.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -54,7 +55,7 @@ public class TestAsyncAdminWithRegionReplicas extends TestAsyncAdminBase {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TestAsyncAdminBase.setUpBeforeClass();
- HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3);
try (ConnectionRegistry registry =
ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent())) {
RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, registry);
@@ -80,7 +81,7 @@ public void testMoveNonDefaultReplica()
throws InterruptedException, ExecutionException, IOException {
createTableWithDefaultConf(tableName, 3);
testMoveNonDefaultReplica(tableName);
- testMoveNonDefaultReplica(TableName.META_TABLE_NAME);
+ testMoveNonDefaultReplica(MetaTableName.getInstance());
}
@Test
@@ -138,11 +139,11 @@ public void testCloneTableSchema() throws IOException, InterruptedException, Exe
@Test
public void testGetTableRegions() throws InterruptedException, ExecutionException, IOException {
- List metaRegions = admin.getRegions(TableName.META_TABLE_NAME).get();
+ List metaRegions = admin.getRegions(MetaTableName.getInstance()).get();
assertEquals(3, metaRegions.size());
for (int i = 0; i < 3; i++) {
RegionInfo metaRegion = metaRegions.get(i);
- assertEquals(TableName.META_TABLE_NAME, metaRegion.getTable());
+ assertEquals(MetaTableName.getInstance(), metaRegion.getTable());
assertEquals(i, metaRegion.getReplicaId());
}
createTableWithDefaultConf(tableName, 3);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java
index e14cd32a3889..7dc75ee9d935 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocator.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.RegionReplicaTestHelper.Locator;
import org.apache.hadoop.hbase.client.trace.StringTraceRenderer;
@@ -105,7 +106,7 @@ private void assertInitialized() {
protected void before() throws Throwable {
final AsyncAdmin admin = connectionRule.getAsyncConnection().getAdmin();
testUtil = miniClusterRule.getTestingUtility();
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 3);
testUtil.waitUntilNoRegionsInTransition();
registry = ConnectionRegistryFactory.create(testUtil.getConfiguration(), User.getCurrent());
RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(testUtil, registry);
@@ -163,7 +164,7 @@ public void test() throws Exception {
TraceUtil.trace(() -> {
try {
- testLocator(miniClusterRule.getTestingUtility(), TableName.META_TABLE_NAME,
+ testLocator(miniClusterRule.getTestingUtility(), MetaTableName.getInstance(),
new Locator() {
@Override
public void updateCachedLocationOnError(HRegionLocation loc, Throwable error) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
index 0bfbd18eb32f..4d4c307f4593 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.client.RegionReplicaTestHelper.Locator;
@@ -106,9 +107,9 @@ public static void setUp() throws Exception {
admin.balancerSwitch(false, true);
// Enable hbase:meta replication.
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, NUM_OF_META_REPLICA);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), NUM_OF_META_REPLICA);
TEST_UTIL.waitFor(30000,
- () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size()
+ () -> TEST_UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size()
>= NUM_OF_META_REPLICA);
SPLIT_KEYS = new byte[8][];
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java
index 61dd87007c11..ff869d5c9c0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi2.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
@@ -37,6 +36,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
@@ -86,7 +86,7 @@ public void testSplitSwitch() throws Exception {
final int rows = 10000;
TestAsyncRegionAdminApi.loadData(tableName, families, rows);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regionLocations =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
int originalCount = regionLocations.size();
@@ -117,7 +117,7 @@ public void testMergeSwitch() throws Exception {
byte[][] families = { FAMILY };
TestAsyncRegionAdminApi.loadData(tableName, families, 1000);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regionLocations =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
int originalCount = regionLocations.size();
@@ -162,7 +162,7 @@ public void testMergeRegions() throws Exception {
byte[][] splitRows = new byte[][] { Bytes.toBytes("3"), Bytes.toBytes("6") };
createTableWithDefaultConf(tableName, splitRows);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regionLocations =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
RegionInfo regionA;
@@ -242,7 +242,7 @@ private void splitTest(TableName tableName, int rowCount, boolean isSplitRegion,
// create table
createTableWithDefaultConf(tableName);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regionLocations =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
assertEquals(1, regionLocations.size());
@@ -299,7 +299,7 @@ public void testTruncateRegion() throws Exception {
final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) };
createTableWithDefaultConf(tableName, splitKeys, bFamilies);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regionLocations =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
RegionInfo regionToBeTruncated = regionLocations.get(0).getRegion();
@@ -333,7 +333,7 @@ public void testTruncateReplicaRegionNotAllowed() throws Exception {
final byte[][] bFamilies = new byte[][] { Bytes.toBytes(family1), Bytes.toBytes(family2) };
createTableWithDefaultConf(tableName, 2, splitKeys, bFamilies);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regionLocations =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
RegionInfo primaryRegion = regionLocations.get(0).getRegion();
@@ -354,7 +354,7 @@ public void testTruncateReplicaRegionNotAllowed() throws Exception {
@Test
public void testTruncateRegionsMetaTableRegionsNotAllowed() throws Exception {
- AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(META_TABLE_NAME);
+ AsyncTableRegionLocator locator = ASYNC_CONN.getRegionLocator(MetaTableName.getInstance());
List regionLocations = locator.getAllRegionLocations().get();
HRegionLocation regionToBeTruncated = regionLocations.get(0);
// 1
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
index 33fbc906f19f..44d01cecb92a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -33,6 +32,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -73,14 +73,14 @@ public void testCreateTable() throws Exception {
static TableState.State getStateFromMeta(TableName table) throws Exception {
Optional state = ClientMetaTableAccessor
- .getTableState(ASYNC_CONN.getTable(TableName.META_TABLE_NAME), table).get();
+ .getTableState(ASYNC_CONN.getTable(MetaTableName.getInstance()), table).get();
assertTrue(state.isPresent());
return state.get().getState();
}
@Test
public void testCreateTableNumberOfRegions() throws Exception {
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
createTableWithDefaultConf(tableName);
List regionLocations =
@@ -128,7 +128,7 @@ public void testCreateTableWithRegions() throws Exception {
boolean tableAvailable = admin.isTableAvailable(tableName).get();
assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regions =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
Iterator hris = regions.iterator();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
index 9db82a3bcd82..f56412c5d589 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi2.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -57,7 +58,7 @@ public class TestAsyncTableAdminApi2 extends TestAsyncAdminBase {
@Test
public void testDisableCatalogTable() throws Exception {
try {
- this.admin.disableTable(TableName.META_TABLE_NAME).join();
+ this.admin.disableTable(MetaTableName.getInstance()).join();
fail("Expected to throw ConstraintException");
} catch (Exception e) {
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
index d9007f748308..89708d40bf1e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi3.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory.TRACKER_IMPL;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.MatcherAssert.assertThat;
@@ -35,6 +34,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -66,10 +66,10 @@ public void testTableExist() throws Exception {
TEST_UTIL.createTable(tableName, FAMILY);
exist = admin.tableExists(tableName).get();
assertTrue(exist);
- exist = admin.tableExists(TableName.META_TABLE_NAME).get();
+ exist = admin.tableExists(MetaTableName.getInstance()).get();
assertTrue(exist);
// meta table already exists
- exist = admin.tableExists(TableName.META_TABLE_NAME).get();
+ exist = admin.tableExists(MetaTableName.getInstance()).get();
assertTrue(exist);
}
@@ -118,7 +118,7 @@ public void testListTables() throws Exception {
assertEquals(0, size);
Collections.addAll(tableNames, tables);
- tableNames.add(TableName.META_TABLE_NAME);
+ tableNames.add(MetaTableName.getInstance());
tableDescs = admin.listTableDescriptors(tableNames).get();
size = tableDescs.size();
assertEquals(tables.length + 1, size);
@@ -126,7 +126,7 @@ public void testListTables() throws Exception {
assertTrue("tableName should be equal in order",
tableDescs.get(j).getTableName().equals(tables[i]));
}
- assertTrue(tableDescs.get(size - 1).getTableName().equals(TableName.META_TABLE_NAME));
+ assertTrue(tableDescs.get(size - 1).getTableName().equals(MetaTableName.getInstance()));
for (int i = 0; i < tables.length; i++) {
admin.disableTable(tables[i]).join();
@@ -205,7 +205,7 @@ public void testDisableAndEnableTable() throws Exception {
assertTrue(ok);
// meta table can not be disabled.
try {
- admin.disableTable(TableName.META_TABLE_NAME).get();
+ admin.disableTable(MetaTableName.getInstance()).get();
fail("meta table can not be disabled");
} catch (ExecutionException e) {
Throwable cause = e.getCause();
@@ -285,7 +285,7 @@ public void testEnableTableRetainAssignment() throws Exception {
int expectedRegions = splitKeys.length + 1;
createTableWithDefaultConf(tableName, splitKeys);
- AsyncTable metaTable = ASYNC_CONN.getTable(META_TABLE_NAME);
+ AsyncTable metaTable = ASYNC_CONN.getTable(MetaTableName.getInstance());
List regions =
ClientMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).get();
assertEquals(
@@ -314,8 +314,8 @@ public void testIsTableEnabledAndDisabled() throws Exception {
assertTrue(admin.isTableDisabled(tableName).get());
// meta table is always enabled
- assertTrue(admin.isTableEnabled(TableName.META_TABLE_NAME).get());
- assertFalse(admin.isTableDisabled(TableName.META_TABLE_NAME).get());
+ assertTrue(admin.isTableEnabled(MetaTableName.getInstance()).get());
+ assertFalse(admin.isTableDisabled(MetaTableName.getInstance()).get());
}
@Test
@@ -323,6 +323,6 @@ public void testIsTableAvailable() throws Exception {
createTableWithDefaultConf(tableName);
TEST_UTIL.waitTableAvailable(tableName);
assertTrue(admin.isTableAvailable(tableName).get());
- assertTrue(admin.isTableAvailable(TableName.META_TABLE_NAME).get());
+ assertTrue(admin.isTableAvailable(MetaTableName.getInstance()).get());
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java
index 18c53a49de7b..96bb7b2bd1ba 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableUseMetaReplicas.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
@@ -94,14 +95,14 @@ public static void setUp() throws Exception {
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
FailPrimaryMetaScanCp.class.getName());
UTIL.startMiniCluster(3);
- HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 3);
try (ConnectionRegistry registry = ConnectionRegistryFactory.create(conf, User.getCurrent())) {
RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(UTIL, registry);
}
try (Table table = UTIL.createTable(TABLE_NAME, FAMILY)) {
table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE));
}
- UTIL.flush(TableName.META_TABLE_NAME);
+ UTIL.flush(MetaTableName.getInstance());
// wait for the store file refresh so we can read the region location from secondary meta
// replicas
Thread.sleep(2000);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
index 184b4ba0d3cc..e180d33df541 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCatalogReplicaLoadBalanceSimpleSelector.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
@@ -29,6 +28,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.RegionLocations;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -72,9 +72,9 @@ public static void setUp() throws Exception {
admin.balancerSwitch(false, true);
// Enable hbase:meta replication.
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, numOfMetaReplica);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), numOfMetaReplica);
TEST_UTIL.waitFor(30000,
- () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size()
+ () -> TEST_UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size()
>= numOfMetaReplica);
registry = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
@@ -95,14 +95,14 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted
CatalogReplicaLoadBalanceSimpleSelector.class.getName());
CatalogReplicaLoadBalanceSelector metaSelector = CatalogReplicaLoadBalanceSelectorFactory
- .createSelector(replicaSelectorClass, META_TABLE_NAME, CONN, () -> {
+ .createSelector(replicaSelectorClass, MetaTableName.getInstance(), CONN, () -> {
int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
try {
RegionLocations metaLocations = CONN.registry.getMetaRegionLocations()
.get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
numOfReplicas = metaLocations.size();
} catch (Exception e) {
- LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
+ LOG.error("Failed to get table {}'s region replication, ", MetaTableName.getInstance(), e);
}
return numOfReplicas;
});
@@ -116,12 +116,12 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted
IntStream.range(0, numOfMetaReplica).forEach(i -> assertNotEquals(replicaIdCount[i], 0));
// Change to No meta replica
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 1);
TEST_UTIL.waitFor(30000,
- () -> TEST_UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 1);
+ () -> TEST_UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() == 1);
CatalogReplicaLoadBalanceSelector metaSelectorWithNoReplica =
- CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, META_TABLE_NAME,
+ CatalogReplicaLoadBalanceSelectorFactory.createSelector(replicaSelectorClass, MetaTableName.getInstance(),
CONN, () -> {
int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS;
try {
@@ -129,7 +129,7 @@ public void testMetaChangeFromReplicaNoReplica() throws IOException, Interrupted
.get(CONN.connConf.getMetaReadRpcTimeoutNs(), TimeUnit.NANOSECONDS);
numOfReplicas = metaLocations.size();
} catch (Exception e) {
- LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e);
+ LOG.error("Failed to get table {}'s region replication, ", MetaTableName.getInstance(), e);
}
return numOfReplicas;
});
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java
index 912ded0a27bb..3c314b5aa968 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCleanupMetaReplica.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -48,7 +49,7 @@ public void testReplicaCleanup() throws Exception {
ZKWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
List metaReplicaZnodes = zkw.getMetaReplicaNodes();
assertEquals(3, metaReplicaZnodes.size());
- HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 1);
+ HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 1);
metaReplicaZnodes = zkw.getMetaReplicaNodes();
assertEquals(1, metaReplicaZnodes.size());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java
index 583dc02763d0..91beea20c73c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerTimeouts.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster.MiniHBaseClusterRegionServer;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
import org.apache.hadoop.hbase.ipc.CallTimeoutException;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -453,7 +454,7 @@ public ScanResponse scan(final RpcController controller, final ScanRequest reque
} else {
ScanResponse scanRes = super.scan(controller, request);
String regionName = Bytes.toString(request.getRegion().getValue().toByteArray());
- if (!regionName.contains(TableName.META_TABLE_NAME.getNameAsString())) {
+ if (!regionName.contains(MetaTableName.getInstance().getNameAsString())) {
tableScannerId = scanRes.getScannerId();
if (sleepOnOpen) {
try {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java
index 253e61f995cf..b9294f8f991b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientSideRegionScanner.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.client.metrics.ScanMetricsRegionInfo;
import org.apache.hadoop.hbase.filter.FilterBase;
@@ -89,8 +90,8 @@ public void setup() throws IOException {
conf = TEST_UTIL.getConfiguration();
rootDir = TEST_UTIL.getDefaultRootDirPath();
fs = TEST_UTIL.getTestFileSystem();
- htd = TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME);
- hri = TEST_UTIL.getAdmin().getRegions(TableName.META_TABLE_NAME).get(0);
+ htd = TEST_UTIL.getAdmin().getDescriptor(MetaTableName.getInstance());
+ hri = TEST_UTIL.getAdmin().getRegions(MetaTableName.getInstance()).get(0);
scan = new Scan();
}
@@ -200,7 +201,7 @@ private void testScanMetricsWithScanMetricsByRegionDisabled(ScanMetrics scanMetr
Configuration copyConf = new Configuration(conf);
Scan scan = new Scan();
scan.setScanMetricsEnabled(true);
- TEST_UTIL.getAdmin().flush(TableName.META_TABLE_NAME);
+ TEST_UTIL.getAdmin().flush(MetaTableName.getInstance());
try (ClientSideRegionScanner clientSideRegionScanner =
new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, scanMetrics)) {
clientSideRegionScanner.next();
@@ -229,7 +230,7 @@ private void testScanMetricByRegion(ScanMetrics scanMetrics) throws IOException
Configuration copyConf = new Configuration(conf);
Scan scan = new Scan();
scan.setEnableScanMetricsByRegion(true);
- TEST_UTIL.getAdmin().flush(TableName.META_TABLE_NAME);
+ TEST_UTIL.getAdmin().flush(MetaTableName.getInstance());
try (ClientSideRegionScanner clientSideRegionScanner =
new ClientSideRegionScanner(copyConf, fs, rootDir, htd, hri, scan, scanMetrics)) {
clientSideRegionScanner.next();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
index 2384e02955da..b5b3e652ea89 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
@@ -92,7 +93,7 @@ public void testDeleteForSureClearsAllTableRowsFromMeta()
}
// Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR
// content from a few of the rows.
- try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table metaTable = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance())) {
try (ResultScanner scanner = metaTable.getScanner(
MetaTableAccessor.getScanForTableName(TEST_UTIL.getConfiguration(), tableName))) {
for (Result result : scanner) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
index 8f820158e460..8a46e0f79a18 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide5.java
@@ -51,6 +51,7 @@
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Scan.ReadType;
@@ -2532,7 +2533,7 @@ public void testFilterAllRecords() throws IOException {
scan.setCaching(1);
// Filter out any records
scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0])));
- try (Table table = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance())) {
try (ResultScanner s = table.getScanner(scan)) {
assertNull(s.next());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java
index f93fc9d5bf5d..044be8b5aa70 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncreaseMetaReplicaThroughConfig.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -50,7 +51,7 @@ public static void setUp() throws Exception {
public void testUpgradeAndIncreaseReplicaCount() throws Exception {
HMaster oldMaster = TEST_UTIL.getMiniHBaseCluster().getMaster();
TableDescriptors oldTds = oldMaster.getTableDescriptors();
- TableDescriptor oldMetaTd = oldTds.get(TableName.META_TABLE_NAME);
+ TableDescriptor oldMetaTd = oldTds.get(MetaTableName.getInstance());
assertEquals(3, oldMetaTd.getRegionReplication());
// force update the replica count to 1 and then kill the master, to simulate that hen upgrading,
// we have no region replication in meta table descriptor but we actually have meta region
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
index d79603cea3cc..e14ccfa949e8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMasterRegistry.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.security.User;
@@ -61,7 +62,7 @@ public static void setUp() throws Exception {
StartTestingClusterOption.Builder builder = StartTestingClusterOption.builder();
builder.numMasters(3).numRegionServers(3);
TEST_UTIL.startMiniCluster(builder.build());
- HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3);
}
@AfterClass
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
index ce52918bfe42..5c4d6642f311 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaCache.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.RetryImmediatelyException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil;
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
import org.apache.hadoop.hbase.quotas.RpcThrottlingException;
@@ -87,7 +88,7 @@ public static void setUpBeforeClass() throws Exception {
conf.setStrings(HConstants.REGION_SERVER_IMPL, RegionServerWithFakeRpcServices.class.getName());
TEST_UTIL.startMiniCluster(1);
TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster();
- TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
+ TEST_UTIL.waitUntilAllRegionsAssigned(MetaTableName.getInstance());
badRS = TEST_UTIL.getHBaseCluster().getRegionServer(0);
assertTrue(badRS.getRSRpcServices() instanceof FakeRSRpcServices);
TableDescriptor desc = TableDescriptorBuilder.newBuilder(TABLE_NAME)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
index 29223dea5dbe..977a3571f595 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaRegionLocationCache.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.MultithreadedTestUtil;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.security.User;
@@ -63,7 +64,7 @@ public class TestMetaRegionLocationCache {
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL.startMiniCluster(3);
- HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3);
REGISTRY = ConnectionRegistryFactory.create(TEST_UTIL.getConfiguration(), User.getCurrent());
RegionReplicaTestHelper.waitUntilAllMetaReplicasAreReady(TEST_UTIL, REGISTRY);
TEST_UTIL.getAdmin().balancerSwitch(false, true);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java
index eae7078639d1..799c90c57c3f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasBasic.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -54,7 +55,7 @@ public static void setUp() throws Exception {
@Test
public void testMetaHTDReplicaCount() throws Exception {
assertEquals(3,
- TEST_UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME).getRegionReplication());
+ TEST_UTIL.getAdmin().getDescriptor(MetaTableName.getInstance()).getRegionReplication());
}
@Test
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java
index 267d618d03d1..aab092dda927 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicasShutdownHandling.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -97,7 +98,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex
ServerName master = null;
try (Connection c = ConnectionFactory.createConnection(util.getConfiguration())) {
try (Table htable = util.createTable(TABLE, FAMILIES)) {
- util.getAdmin().flush(TableName.META_TABLE_NAME);
+ util.getAdmin().flush(MetaTableName.getInstance());
Thread.sleep(
conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 6);
List regions = MetaTableAccessor.getTableRegions(c, TABLE);
@@ -114,7 +115,7 @@ public static void shutdownMetaAndDoValidations(HBaseTestingUtil util) throws Ex
Thread.sleep(10);
hrl = MetaTableAccessor.getRegionLocation(c, regions.get(0));
} while (primary.equals(hrl.getServerName()));
- util.getAdmin().flush(TableName.META_TABLE_NAME);
+ util.getAdmin().flush(MetaTableName.getInstance());
Thread.sleep(
conf.getInt(StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 30000) * 3);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java
index 55646c35e435..4e6f9965c6d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiActionMetricsFromClient.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -48,7 +49,7 @@ public class TestMultiActionMetricsFromClient {
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(1);
TEST_UTIL.getHBaseCluster().waitForActiveAndReadyMaster();
- TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
+ TEST_UTIL.waitUntilAllRegionsAssigned(MetaTableName.getInstance());
TEST_UTIL.createTable(TABLE_NAME, FAMILY);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
index c38be19a238e..caa7c825601e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicaWithCluster.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@@ -261,7 +262,7 @@ public static void beforeClass() throws Exception {
HTU.startMiniCluster(NB_SERVERS);
// Enable meta replica at server side
- HBaseTestingUtil.setReplicas(HTU.getAdmin(), TableName.META_TABLE_NAME, 2);
+ HBaseTestingUtil.setReplicas(HTU.getAdmin(), MetaTableName.getInstance(), 2);
HTU.getHBaseCluster().startMaster();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java
index d33cc943355c..14c750398a3a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRpcConnectionRegistry.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.BootstrapNodeManager;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
@@ -72,7 +73,7 @@ public static void setUpBeforeClass() throws Exception {
UTIL.getConfiguration().setLong(RpcConnectionRegistry.MIN_SECS_BETWEEN_REFRESHES, 0);
UTIL.getConfiguration().setLong(BootstrapNodeManager.REQUEST_MASTER_MIN_INTERVAL_SECS, 1);
UTIL.startMiniCluster(3);
- HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 3);
}
@AfterClass
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
index 52ccd5d8b7da..84430264474d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
@@ -280,13 +281,13 @@ public void testAsyncTable() throws Exception {
public void testChangeMetaReplicaCount() throws Exception {
Admin admin = TEST_UTIL.getAdmin();
try (RegionLocator locator =
- TEST_UTIL.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+ TEST_UTIL.getConnection().getRegionLocator(MetaTableName.getInstance())) {
assertEquals(1, locator.getAllRegionLocations().size());
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 3);
TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 3);
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 2);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 2);
TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 2);
- HBaseTestingUtil.setReplicas(admin, TableName.META_TABLE_NAME, 1);
+ HBaseTestingUtil.setReplicas(admin, MetaTableName.getInstance(), 1);
TEST_UTIL.waitFor(30000, () -> locator.getAllRegionLocations().size() == 1);
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java
index c39fc076ef2b..32c9df8ad3bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestShutdownOfMetaReplicaHolder.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.junit.BeforeClass;
@@ -50,7 +51,7 @@ public void testShutdownOfReplicaHolder() throws Exception {
// checks that the when the server holding meta replica is shut down, the meta replica
// can be recovered
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
- RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
+ RegionLocator locator = conn.getRegionLocator(MetaTableName.getInstance())) {
HRegionLocation hrl = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true).get(1);
ServerName oldServer = hrl.getServerName();
TEST_UTIL.getHBaseClusterInterface().killRegionServer(oldServer);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
index 8abb4d754a7a..c0b6a337da26 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@@ -163,7 +164,7 @@ public static void cleanupTest() throws Exception {
*/
@Test(expected = IllegalArgumentException.class)
public void testMetaTablesSnapshot() throws Exception {
- UTIL.getAdmin().snapshot("metaSnapshot", TableName.META_TABLE_NAME);
+ UTIL.getAdmin().snapshot("metaSnapshot", MetaTableName.getInstance());
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
index 6d585245e959..395cddf1cd97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestZKConnectionRegistry.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
@@ -64,7 +65,7 @@ public class TestZKConnectionRegistry {
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL.startMiniCluster(3);
- HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(TEST_UTIL.getAdmin(), MetaTableName.getInstance(), 3);
REGISTRY = new ZKConnectionRegistry(TEST_UTIL.getConfiguration(), null);
}
@@ -89,7 +90,7 @@ public void test() throws InterruptedException, ExecutionException, IOException
IntStream.range(0, 3).forEach(i -> {
HRegionLocation loc = locs.getRegionLocation(i);
assertNotNull("Replica " + i + " doesn't have location", loc);
- assertEquals(TableName.META_TABLE_NAME, loc.getRegion().getTable());
+ assertEquals(MetaTableName.getInstance(), loc.getRegion().getTable());
assertEquals(i, loc.getRegion().getReplicaId());
});
}
From 59c879ec1cf883410f425299eb83e7e70866c74b Mon Sep 17 00:00:00 2001
From: Kota-SH
Date: Mon, 15 Dec 2025 18:55:02 -0500
Subject: [PATCH 3/6] HBASE-29691: Change TableName.META_TABLE_NAME from being
a global static: org.apache.hadoop.hbase.master
---
.../apache/hadoop/hbase/master/HMaster.java | 21 +++++++++++--------
...sterAnnotationReadingPriorityFunction.java | 3 ++-
.../master/RegionPlacementMaintainer.java | 12 ++++++-----
.../hbase/master/TableNamespaceManager.java | 9 ++++----
.../hbase/master/TableStateManager.java | 5 +++--
.../master/assignment/AssignmentManager.java | 7 ++++---
.../MergeTableRegionsProcedure.java | 7 +++++--
.../master/assignment/RegionStateStore.java | 20 ++++++++++--------
.../assignment/SplitTableRegionProcedure.java | 7 +++++--
.../cleaner/ReplicationBarrierCleaner.java | 3 ++-
.../hadoop/hbase/master/http/MetaBrowser.java | 3 ++-
.../hbase/master/janitor/CatalogJanitor.java | 7 ++++---
.../hbase/master/janitor/MetaFixer.java | 13 ++++++------
.../master/janitor/ReportMakingVisitor.java | 6 ++++--
.../procedure/DeleteTableProcedure.java | 5 +++--
.../procedure/DisableTableProcedure.java | 5 +++--
.../procedure/HBCKServerCrashProcedure.java | 13 ++++++------
.../master/procedure/InitMetaProcedure.java | 7 ++++---
.../procedure/MasterProcedureScheduler.java | 5 +++--
.../hbase/master/procedure/MetaQueue.java | 3 ++-
.../MigrateNamespaceTableProcedure.java | 5 +++--
.../procedure/ModifyTableProcedure.java | 3 ++-
.../hbase/master/procedure/SchemaLocking.java | 5 +++--
.../procedure/TruncateRegionProcedure.java | 3 ++-
.../hadoop/hbase/master/TestMaster.java | 3 ++-
...MasterFileSystemWithStoreFileTracking.java | 3 ++-
...TestMasterOperationsForRegionReplicas.java | 3 ++-
.../hbase/master/TestMasterRepairMode.java | 5 +++--
.../hbase/master/TestMasterTransitions.java | 3 ++-
.../TestMetaAssignmentWithStopMaster.java | 3 ++-
.../TestMigrateAndMirrorMetaLocations.java | 9 ++++----
.../master/TestMigrateNamespaceTable.java | 5 +++--
.../hbase/master/TestRecreateCluster.java | 5 +++--
.../TestRestartWithEmptyWALDirectory.java | 3 ++-
...ServerCrashProcedureCarryingMetaStuck.java | 5 +++--
.../TestCloseRegionWhileRSCrash.java | 7 ++++---
.../TestOpenRegionProcedureBackoff.java | 3 ++-
.../assignment/TestRegionStateStore.java | 3 ++-
.../master/assignment/TestRollbackSCP.java | 3 ++-
.../BalancerConditionalsTestUtil.java | 3 ++-
.../TestFavoredStochasticLoadBalancer.java | 3 ++-
...MetaTableIsolationBalancerConditional.java | 5 +++--
.../TestReplicationBarrierCleaner.java | 13 ++++++------
.../TestCatalogJanitorInMemoryStates.java | 3 ++-
.../janitor/TestMetaFixerNoCluster.java | 17 ++++++++-------
.../TestSimpleRegionNormalizer.java | 3 ++-
.../hbase/master/procedure/TestHBCKSCP.java | 5 +++--
.../procedure/TestProcedurePriority.java | 3 ++-
...TestTableProcedureWaitingQueueCleanup.java | 3 ++-
49 files changed, 178 insertions(+), 120 deletions(-)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 22d3ab69b51c..ac79ed1eb665 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -93,6 +93,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.ServerTask;
import org.apache.hadoop.hbase.ServerTaskBuilder;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -1092,7 +1093,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
startupTaskGroup.addTask("Initializing meta table if this is a new deploy");
InitMetaProcedure initMetaProc = null;
// Print out state of hbase:meta on startup; helps debugging.
- if (!this.assignmentManager.getRegionStates().hasTableRegionStates(TableName.META_TABLE_NAME)) {
+ if (!this.assignmentManager.getRegionStates().hasTableRegionStates(MetaTableName.getInstance())) {
Optional optProc = procedureExecutor.getProcedures().stream()
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
initMetaProc = optProc.orElseGet(() -> {
@@ -1156,7 +1157,7 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
return;
}
- TableDescriptor metaDescriptor = tableDescriptors.get(TableName.META_TABLE_NAME);
+ TableDescriptor metaDescriptor = tableDescriptors.get(MetaTableName.getInstance());
final ColumnFamilyDescriptor tableFamilyDesc =
metaDescriptor.getColumnFamily(HConstants.TABLE_FAMILY);
final ColumnFamilyDescriptor replBarrierFamilyDesc =
@@ -1174,16 +1175,17 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
if (conf.get(HConstants.META_REPLICAS_NUM) != null) {
int replicasNumInConf =
conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM);
- TableDescriptor metaDesc = tableDescriptors.get(TableName.META_TABLE_NAME);
+ TableDescriptor metaDesc = tableDescriptors.get(MetaTableName.getInstance());
if (metaDesc.getRegionReplication() != replicasNumInConf) {
// it is possible that we already have some replicas before upgrading, so we must set the
// region replication number in meta TableDescriptor directly first, without creating a
// ModifyTableProcedure, otherwise it may cause a double assign for the meta replicas.
int existingReplicasCount =
- assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size();
+ assignmentManager.getRegionStates().getRegionsOfTable(MetaTableName.getInstance()).size();
if (existingReplicasCount > metaDesc.getRegionReplication()) {
- LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)"
- + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount);
+ LOG.info(
+ "Update replica count of {} from {}(in TableDescriptor)" + " to {}(existing ZNodes)",
+ MetaTableName.getInstance(), metaDesc.getRegionReplication(), existingReplicasCount);
metaDesc = TableDescriptorBuilder.newBuilder(metaDesc)
.setRegionReplication(existingReplicasCount).build();
tableDescriptors.update(metaDesc);
@@ -1193,7 +1195,8 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE
LOG.info(
"The {} config is {} while the replica count in TableDescriptor is {}"
+ " for hbase:meta, altering...",
- HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication());
+ HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(),
+ MetaTableName.getInstance());
procedureExecutor.submitProcedure(new ModifyTableProcedure(
procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc)
.setRegionReplication(replicasNumInConf).build(),
@@ -1423,7 +1426,7 @@ private void createMissingCFsInMetaDuringUpgrade(TableDescriptor metaDescriptor)
TableDescriptor newMetaDesc = TableDescriptorBuilder.newBuilder(metaDescriptor)
.setColumnFamily(FSTableDescriptors.getTableFamilyDescForMeta(conf))
.setColumnFamily(FSTableDescriptors.getReplBarrierFamilyDescForMeta()).build();
- long pid = this.modifyTable(TableName.META_TABLE_NAME, () -> newMetaDesc, 0, 0, false);
+ long pid = this.modifyTable(MetaTableName.getInstance(), () -> newMetaDesc, 0, 0, false);
int tries = 30;
while (
!(getMasterProcedureExecutor().isFinished(pid)) && getMasterProcedureExecutor().isRunning()
@@ -2586,7 +2589,7 @@ private void startActiveMasterManager(int infoPort) throws KeeperException {
}
private static boolean isCatalogTable(final TableName tableName) {
- return tableName.equals(TableName.META_TABLE_NAME);
+ return tableName.equals(MetaTableName.getInstance());
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
index 464dfaca7035..c77d2c0c1a85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterAnnotationReadingPriorityFunction.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.AnnotationReadingPriorityFunction;
import org.apache.yetus.audience.InterfaceAudience;
@@ -84,7 +85,7 @@ protected int getBasePriority(RequestHeader header, Message param) {
if (rst.getRegionInfoList() != null) {
for (HBaseProtos.RegionInfo info : rst.getRegionInfoList()) {
TableName tn = ProtobufUtil.toTableName(info.getTableName());
- if (TableName.META_TABLE_NAME.equals(tn)) {
+ if (MetaTableName.getInstance().equals(tn)) {
return META_TRANSITION_QOS;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
index 854c21da2bc7..c1195e0a90a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionPlacementMaintainer.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
@@ -605,7 +606,7 @@ public static void printAssignmentPlan(FavoredNodesPlan plan) {
*/
public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException {
try {
- LOG.info("Start to update the hbase:meta with the new assignment plan");
+ LOG.info("Started updating {} with the new assignment plan", MetaTableName.getInstance());
Map> assignmentMap = plan.getAssignmentMap();
Map> planToUpdate = new HashMap<>(assignmentMap.size());
Map regionToRegionInfoMap =
@@ -619,6 +620,7 @@ public void updateAssignmentPlanToMeta(FavoredNodesPlan plan) throws IOException
} catch (Exception e) {
LOG.error(
"Failed to update hbase:meta with the new assignment" + "plan because " + e.getMessage());
+ LOG.info("Updated {} with the new assignment plan", MetaTableName.getInstance());
}
}
@@ -690,14 +692,14 @@ private void updateAssignmentPlanToRegionServers(FavoredNodesPlan plan) throws I
}
public void updateAssignmentPlan(FavoredNodesPlan plan) throws IOException {
- LOG.info("Start to update the new assignment plan for the hbase:meta table and"
- + " the region servers");
+ LOG.info("Started updating the new assignment plan for {} and the region servers",
+ MetaTableName.getInstance());
// Update the new assignment plan to META
updateAssignmentPlanToMeta(plan);
// Update the new assignment plan to Region Servers
updateAssignmentPlanToRegionServers(plan);
- LOG.info("Finish to update the new assignment plan for the hbase:meta table and"
- + " the region servers");
+ LOG.info("Finished updating the new assignment plan for {} and the region servers",
+ MetaTableName.getInstance());
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 4d18b2ad8f4e..70aa46960e00 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Delete;
@@ -79,7 +80,7 @@ private void tryMigrateNamespaceTable() throws IOException, InterruptedException
if (!opt.isPresent()) {
// the procedure is not present, check whether have the ns family in meta table
TableDescriptor metaTableDesc =
- masterServices.getTableDescriptors().get(TableName.META_TABLE_NAME);
+ masterServices.getTableDescriptors().get(MetaTableName.getInstance());
if (metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
// normal case, upgrading is done or the cluster is created with 3.x code
migrationDone = true;
@@ -106,7 +107,7 @@ private void addToCache(Result result, byte[] family, byte[] qualifier) throws I
}
private void loadFromMeta() throws IOException {
- try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME);
+ try (Table table = masterServices.getConnection().getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) {
for (Result result;;) {
result = scanner.next();
@@ -204,7 +205,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns
Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY,
HConstants.NAMESPACE_COL_DESC_QUALIFIER,
ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
- try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = conn.getTable(MetaTableName.getInstance())) {
table.put(put);
}
}
@@ -212,7 +213,7 @@ public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns
public void deleteNamespace(String namespaceName) throws IOException {
checkMigrationDone();
Delete d = new Delete(Bytes.toBytes(namespaceName));
- try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = masterServices.getConnection().getTable(MetaTableName.getInstance())) {
table.delete(d);
}
cache.remove(namespaceName);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
index 6ad32623be1a..cdd54d616bee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.CatalogFamilyFormat;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Result;
@@ -86,7 +87,7 @@ public boolean isTableState(TableName tableName, TableState.State... states) {
}
public void setDeletedTable(TableName tableName) throws IOException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
// Can't delete the hbase:meta table.
return;
}
@@ -147,7 +148,7 @@ public TableState getTableState(TableName tableName) throws IOException {
}
private void updateMetaState(TableName tableName, TableState.State newState) throws IOException {
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
if (
TableState.State.DISABLING.equals(newState) || TableState.State.DISABLED.equals(newState)
) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 1b64ddea23e3..96f4fb0a4ce8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.DoNotRetryRegionException;
@@ -354,7 +355,7 @@ public void start() throws IOException, KeeperException {
if (RegionReplicaUtil.isDefaultReplica(regionInfo.getReplicaId())) {
setMetaAssigned(regionInfo, state == State.OPEN);
}
- LOG.debug("Loaded hbase:meta {}", regionNode);
+ LOG.debug("Loaded {} {}", MetaTableName.getInstance(), regionNode);
}, result);
}
}
@@ -1948,8 +1949,8 @@ private void checkMetaLoaded(RegionInfo hri) throws PleaseHoldException {
boolean meta = isMetaRegion(hri);
boolean metaLoaded = isMetaLoaded();
if (!meta && !metaLoaded) {
- throw new PleaseHoldException(
- "Master not fully online; hbase:meta=" + meta + ", metaLoaded=" + metaLoaded);
+ throw new PleaseHoldException("Master not fully online; " + MetaTableName.getInstance() + "="
+ + meta + ", metaLoaded=" + metaLoaded);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index c370fed9d9c0..11ae106a0b8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -718,8 +719,10 @@ private void preMergeRegionsCommit(final MasterProcedureEnv env) throws IOExcept
RegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
- LOG.error("Row key of mutation from coprocessor is not parsable as region name. "
- + "Mutations from coprocessor should only be for hbase:meta table.", e);
+ LOG.error(
+ "Row key of mutation from coprocessor is not parsable as region name. "
+ + "Mutations from coprocessor should only be for {} table.",
+ MetaTableName.getInstance(), e);
throw e;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 5987fc7537b4..8bcf3b5a93fb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Delete;
@@ -169,9 +170,10 @@ public static void visitMetaEntry(final RegionStateVisitor visitor, final Result
final long openSeqNum = hrl.getSeqNum();
LOG.debug(
- "Load hbase:meta entry region={}, regionState={}, lastHost={}, "
+ "Load {} entry region={}, regionState={}, lastHost={}, "
+ "regionLocation={}, openSeqNum={}",
- regionInfo.getEncodedName(), state, lastHost, regionLocation, openSeqNum);
+ MetaTableName.getInstance(), regionInfo.getEncodedName(), state, lastHost, regionLocation,
+ openSeqNum);
visitor.visitRegionState(result, regionInfo, state, regionLocation, lastHost, openSeqNum);
}
}
@@ -190,8 +192,8 @@ private Put generateUpdateRegionLocationPut(RegionStateNode regionStateNode) thr
final Put put = new Put(CatalogFamilyFormat.getMetaKeyForRegion(regionInfo), time);
MetaTableAccessor.addRegionInfo(put, regionInfo);
final StringBuilder info =
- new StringBuilder("pid=").append(pid).append(" updating hbase:meta row=")
- .append(regionInfo.getEncodedName()).append(", regionState=").append(state);
+ new StringBuilder("pid=").append(pid).append(" updating ").append(MetaTableName.getInstance())
+ .append(" row=").append(regionInfo.getEncodedName()).append(", regionState=").append(state);
if (openSeqNum >= 0) {
Preconditions.checkArgument(state == State.OPEN && regionLocation != null,
"Open region should be on a server");
@@ -283,7 +285,7 @@ private CompletableFuture updateRegionLocation(RegionInfo regionInfo, Stat
future = FutureUtils.failedFuture(e);
}
} else {
- AsyncTable> table = master.getAsyncConnection().getTable(TableName.META_TABLE_NAME);
+ AsyncTable> table = master.getAsyncConnection().getTable(MetaTableName.getInstance());
future = table.put(put);
}
FutureUtils.addListener(future, (r, e) -> {
@@ -330,7 +332,7 @@ private void multiMutate(RegionInfo ri, List mutations) throws IOExcep
}
MutateRowsRequest request = builder.build();
AsyncTable> table =
- master.getConnection().toAsyncConnection().getTable(TableName.META_TABLE_NAME);
+ master.getConnection().toAsyncConnection().getTable(MetaTableName.getInstance());
CompletableFuture future = table. coprocessorService(MultiRowMutationService::newStub,
(stub, controller, done) -> stub.mutateRows(controller, request, done), row);
@@ -338,7 +340,7 @@ MutateRowsResponse> coprocessorService(MultiRowMutationService::newStub,
}
private Table getMetaTable() throws IOException {
- return master.getConnection().getTable(TableName.META_TABLE_NAME);
+ return master.getConnection().getTable(MetaTableName.getInstance());
}
private Result getRegionCatalogResult(RegionInfo region) throws IOException {
@@ -504,7 +506,7 @@ public void deleteMergeQualifiers(RegionInfo mergeRegion) throws IOException {
+ " in meta table, they are cleaned up already, Skip.");
return;
}
- try (Table table = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = master.getConnection().getTable(MetaTableName.getInstance())) {
table.delete(delete);
}
LOG.info(
@@ -694,7 +696,7 @@ public static State getRegionState(final Result r, RegionInfo regionInfo) {
return State.valueOf(state);
} catch (IllegalArgumentException e) {
LOG.warn(
- "BAD value {} in hbase:meta info:state column for region {} , "
+ "BAD value {} in " + MetaTableName.getInstance() + " info:state column for region {} , "
+ "Consider using HBCK2 setRegionState ENCODED_REGION_NAME STATE",
state, regionInfo.getEncodedName());
return null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 3d3d3d18de23..1e405a001a9e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -903,8 +904,10 @@ private void preSplitRegionBeforeMETA(final MasterProcedureEnv env)
RegionInfo.parseRegionName(p.getRow());
}
} catch (IOException e) {
- LOG.error("pid=" + getProcId() + " row key of mutation from coprocessor not parsable as "
- + "region name." + "Mutations from coprocessor should only for hbase:meta table.");
+ LOG.error(
+ "pid={} row key of mutation from coprocessor not parsable as region name. "
+ + "Mutations from coprocessor should only be for {} table.",
+ getProcId(), MetaTableName.getInstance());
throw e;
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java
index 77b1082d0f03..1f141a8de06e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/ReplicationBarrierCleaner.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Delete;
@@ -80,7 +81,7 @@ public synchronized void chore() {
long deletedLastPushedSeqIds = 0;
TableName tableName = null;
List peerIds = null;
- try (Table metaTable = conn.getTable(TableName.META_TABLE_NAME);
+ try (Table metaTable = conn.getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(
new Scan().addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions())) {
for (;;) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
index 9f5ff857d4d8..f9fa67da83ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MetaBrowser.java
@@ -33,6 +33,7 @@
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer;
import org.apache.hadoop.hbase.client.AsyncConnection;
@@ -156,7 +157,7 @@ public TableName getScanTable() {
public Results getResults() {
final AsyncTable asyncTable =
- connection.getTable(TableName.META_TABLE_NAME);
+ connection.getTable(MetaTableName.getInstance());
return new Results(asyncTable.getScanner(buildScan()));
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
index 0d3ddb43abd4..4e7925e708af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/CatalogJanitor.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -105,7 +106,7 @@ protected boolean initialChore() {
scan();
}
} catch (IOException e) {
- LOG.warn("Failed initial janitorial scan of hbase:meta table", e);
+ LOG.warn("Failed initial janitorial scan of {} table", MetaTableName.getInstance(), e);
return false;
}
return true;
@@ -145,7 +146,7 @@ protected void chore() {
+ this.services.getServerManager().isClusterShutdown());
}
} catch (IOException e) {
- LOG.warn("Failed janitorial scan of hbase:meta table", e);
+ LOG.warn("Failed janitorial scan of {} table", MetaTableName.getInstance(), e);
}
}
@@ -484,7 +485,7 @@ public static void main(String[] args) throws IOException {
*/
Get g = new Get(Bytes.toBytes("t2,40,1564119846424.1db8c57d64e0733e0f027aaeae7a0bf0."));
g.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- try (Table t = connection.getTable(TableName.META_TABLE_NAME)) {
+ try (Table t = connection.getTable(MetaTableName.getInstance())) {
Result r = t.get(g);
byte[] row = g.getRow();
row[row.length - 2] <<= row[row.length - 2];
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
index 1244d5bf3525..9b30d5198510 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/MetaFixer.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -203,19 +204,19 @@ private static List createMetaEntries(final MasterServices masterSer
.flatMap(List::stream).collect(Collectors.toList());
final List createMetaEntriesFailures = addMetaEntriesResults.stream()
.filter(Either::hasRight).map(Either::getRight).collect(Collectors.toList());
- LOG.debug("Added {}/{} entries to hbase:meta", createMetaEntriesSuccesses.size(),
- newRegionInfos.size());
+ LOG.debug("Added {}/{} entries to {}", createMetaEntriesSuccesses.size(), newRegionInfos.size(),
+ MetaTableName.getInstance());
if (!createMetaEntriesFailures.isEmpty()) {
LOG.warn(
- "Failed to create entries in hbase:meta for {}/{} RegionInfo descriptors. First"
+ "Failed to create entries in {}} for {}/{} RegionInfo descriptors. First"
+ " failure message included; full list of failures with accompanying stack traces is"
+ " available at log level DEBUG. message={}",
- createMetaEntriesFailures.size(), addMetaEntriesResults.size(),
+ MetaTableName.getInstance(), createMetaEntriesFailures.size(), addMetaEntriesResults.size(),
createMetaEntriesFailures.get(0).getMessage());
if (LOG.isDebugEnabled()) {
- createMetaEntriesFailures
- .forEach(ioe -> LOG.debug("Attempt to fix region hole in hbase:meta failed.", ioe));
+ createMetaEntriesFailures.forEach(ioe -> LOG
+ .debug("Attempt to fix region hole in {} failed.", MetaTableName.getInstance(), ioe));
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
index c712f1cba672..f370fdc2ffe8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/janitor/ReportMakingVisitor.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Result;
@@ -137,8 +138,9 @@ private RegionInfo metaTableConsistencyCheck(Result metaTableRow) {
if (!Bytes.equals(metaTableRow.getRow(), ri.getRegionName())) {
LOG.warn(
"INCONSISTENCY: Row name is not equal to serialized info:regioninfo content; "
- + "row={} {}; See if RegionInfo is referenced in another hbase:meta row? Delete?",
- Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString());
+ + "row={} {}; See if RegionInfo is referenced in another {} row? Delete?",
+ Bytes.toStringBinary(metaTableRow.getRow()), ri.getRegionNameAsString(),
+ MetaTableName.getInstance());
return null;
}
// Skip split parent region
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
index e199f6d5971d..960fa3defe6d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -394,7 +395,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table
long now = EnvironmentEdgeManager.currentTime();
List deletes = new ArrayList<>();
try (
- Table metaTable = env.getMasterServices().getConnection().getTable(TableName.META_TABLE_NAME);
+ Table metaTable = env.getMasterServices().getConnection().getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(tableScan)) {
for (;;) {
Result result = scanner.next();
@@ -405,7 +406,7 @@ private static void cleanRegionsInMeta(final MasterProcedureEnv env, final Table
}
if (!deletes.isEmpty()) {
LOG.warn("Deleting some vestigial " + deletes.size() + " rows of " + tableName + " from "
- + TableName.META_TABLE_NAME);
+ + MetaTableName.getInstance());
metaTable.delete(deletes);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
index e8999b886afd..8ce33c1574ca 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
@@ -111,7 +112,7 @@ protected Flow executeFromState(final MasterProcedureEnv env, final DisableTable
) {
MasterFileSystem fs = env.getMasterFileSystem();
try (BufferedMutator mutator = env.getMasterServices().getConnection()
- .getBufferedMutator(TableName.META_TABLE_NAME)) {
+ .getBufferedMutator(MetaTableName.getInstance())) {
for (RegionInfo region : env.getAssignmentManager().getRegionStates()
.getRegionsOfTable(tableName)) {
long maxSequenceId = WALSplitUtil.getMaxRegionSequenceId(
@@ -230,7 +231,7 @@ public TableOperationType getTableOperationType() {
*/
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true;
- if (tableName.equals(TableName.META_TABLE_NAME)) {
+ if (tableName.equals(MetaTableName.getInstance())) {
setFailure("master-disable-table",
new ConstraintException("Cannot disable " + this.tableName));
canTableBeDisabled = false;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
index 43d69361c2d2..635dcc0a91b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/HBCKServerCrashProcedure.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
@@ -102,14 +103,14 @@ List getRegionsOnCrashedServer(MasterProcedureEnv env) {
MetaTableAccessor.scanMetaForTableRegions(env.getMasterServices().getConnection(), visitor,
null);
} catch (IOException ioe) {
- LOG.warn("Failed scan of hbase:meta for 'Unknown Servers'", ioe);
+ LOG.warn("Failed scan of {} for 'Unknown Servers'", MetaTableName.getInstance(), ioe);
return ris;
}
// create the server state node too
env.getAssignmentManager().getRegionStates().createServer(getServerName());
- LOG.info("Found {} mentions of {} in hbase:meta of OPEN/OPENING Regions: {}",
- visitor.getReassigns().size(), getServerName(), visitor.getReassigns().stream()
- .map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
+ LOG.info("Found {} mentions of {} in {} of OPEN/OPENING Regions: {}",
+ visitor.getReassigns().size(), getServerName(), MetaTableName.getInstance(), visitor
+ .getReassigns().stream().map(RegionInfo::getEncodedName).collect(Collectors.joining(",")));
return visitor.getReassigns();
}
@@ -150,8 +151,8 @@ public boolean visit(Result result) throws IOException {
RegionState rs = new RegionState(hrl.getRegion(), state, hrl.getServerName());
if (rs.isClosing()) {
// Move region to CLOSED in hbase:meta.
- LOG.info("Moving {} from CLOSING to CLOSED in hbase:meta",
- hrl.getRegion().getRegionNameAsString());
+ LOG.info("Moving {} from CLOSING to CLOSED in {}",
+ hrl.getRegion().getRegionNameAsString(), MetaTableName.getInstance());
try {
MetaTableAccessor.updateRegionState(this.connection, hrl.getRegion(),
RegionState.State.CLOSED);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index 8b4901e90e85..2dfc652fc542 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -67,7 +68,7 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure proc) {
// Meta Queue Lookup Helpers
// ============================================================================
private MetaQueue getMetaQueue() {
- MetaQueue node = AvlTree.get(metaMap, TableName.META_TABLE_NAME, META_QUEUE_KEY_COMPARATOR);
+ MetaQueue node = AvlTree.get(metaMap, MetaTableName.getInstance(), META_QUEUE_KEY_COMPARATOR);
if (node != null) {
return node;
}
@@ -1079,7 +1080,7 @@ public boolean waitMetaExclusiveLock(Procedure> procedure) {
return false;
}
waitProcedure(lock, procedure);
- logLockedResource(LockedResourceType.META, TableName.META_TABLE_NAME.getNameAsString());
+ logLockedResource(LockedResourceType.META, MetaTableName.getInstance().getNameAsString());
return true;
} finally {
schedUnlock();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java
index 3d313c9ac3ab..5915971bd4c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MetaQueue.java
@@ -19,6 +19,7 @@
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.procedure2.LockStatus;
import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -33,7 +34,7 @@
class MetaQueue extends Queue {
protected MetaQueue(LockStatus lockStatus) {
- super(TableName.META_TABLE_NAME, 1, lockStatus);
+ super(MetaTableName.getInstance(), 1, lockStatus);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
index dc9eac4c879d..30a120143ade 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MigrateNamespaceTableProcedure.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.Connection;
@@ -64,7 +65,7 @@ private void migrate(MasterProcedureEnv env) throws IOException {
try (Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME);
ResultScanner scanner = nsTable.getScanner(
new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions());
- BufferedMutator mutator = conn.getBufferedMutator(TableName.META_TABLE_NAME)) {
+ BufferedMutator mutator = conn.getBufferedMutator(MetaTableName.getInstance())) {
for (Result result;;) {
result = scanner.next();
if (result == null) {
@@ -88,7 +89,7 @@ protected Flow executeFromState(MasterProcedureEnv env, MigrateNamespaceTablePro
switch (state) {
case MIGRATE_NAMESPACE_TABLE_ADD_FAMILY:
TableDescriptor metaTableDesc =
- env.getMasterServices().getTableDescriptors().get(TableName.META_TABLE_NAME);
+ env.getMasterServices().getTableDescriptors().get(MetaTableName.getInstance());
if (!metaTableDesc.hasColumnFamily(HConstants.NAMESPACE_FAMILY)) {
TableDescriptor newMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc)
.setColumnFamily(
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 0d8981891e54..0280516f28ba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -108,7 +109,7 @@ protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws H
for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) {
if (!cfs.contains(family)) {
throw new HBaseIOException(
- "Delete of hbase:meta column family " + Bytes.toString(family));
+ "Delete of " + MetaTableName.getInstance() + " column family " + Bytes.toString(family));
}
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java
index 642df36d535f..2afaf1c20b6f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SchemaLocking.java
@@ -26,6 +26,7 @@
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.procedure2.LockAndQueue;
@@ -174,7 +175,7 @@ List getLocks() {
addToLockedResources(lockedResources, regionLocks, Function.identity(),
LockedResourceType.REGION);
addToLockedResources(lockedResources, peerLocks, Function.identity(), LockedResourceType.PEER);
- addToLockedResources(lockedResources, ImmutableMap.of(TableName.META_TABLE_NAME, metaLock),
+ addToLockedResources(lockedResources, ImmutableMap.of(MetaTableName.getInstance(), metaLock),
tn -> tn.getNameAsString(), LockedResourceType.META);
addToLockedResources(lockedResources, globalLocks, Function.identity(),
LockedResourceType.GLOBAL);
@@ -236,7 +237,7 @@ public String toString() {
.append("tableLocks", filterUnlocked(tableLocks))
.append("regionLocks", filterUnlocked(regionLocks))
.append("peerLocks", filterUnlocked(peerLocks))
- .append("metaLocks", filterUnlocked(ImmutableMap.of(TableName.META_TABLE_NAME, metaLock)))
+ .append("metaLocks", filterUnlocked(ImmutableMap.of(MetaTableName.getInstance(), metaLock)))
.append("globalLocks", filterUnlocked(globalLocks)).build();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
index ef11e68217a5..57f4e9e4359c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateRegionProcedure.java
@@ -20,6 +20,7 @@
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -231,7 +232,7 @@ public void toStringClassDetails(StringBuilder sb) {
}
private boolean prepareTruncate() throws IOException {
- if (getTableName().equals(TableName.META_TABLE_NAME)) {
+ if (getTableName().equals(MetaTableName.getInstance())) {
throw new IOException("Can't truncate region in catalog tables");
}
return true;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index 41848a58b784..16c3829a1a7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -40,6 +40,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
@@ -243,7 +244,7 @@ public void testFlushedSequenceIdPersistLoad() throws Exception {
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf"))).build();
Table table = TEST_UTIL.createTable(tableDescriptor, null);
// flush META region
- TEST_UTIL.flush(TableName.META_TABLE_NAME);
+ TEST_UTIL.flush(MetaTableName.getInstance());
// wait for regionserver report
Threads.sleep(msgInterval * 2);
// record flush seqid before cluster shutdown
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java
index b3fadc7ed27a..4fda3f2b5677 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystemWithStoreFileTracking.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@@ -64,7 +65,7 @@ public static void teardownTest() throws Exception {
@Test
public void tesMetaDescriptorHasSFTConfig() throws Exception {
- TableDescriptor descriptor = UTIL.getAdmin().getDescriptor(TableName.META_TABLE_NAME);
+ TableDescriptor descriptor = UTIL.getAdmin().getDescriptor(MetaTableName.getInstance());
assertEquals(FILE.name(), descriptor.getValue(TRACKER_IMPL));
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index f640c3084cb8..df1e86fa4916 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
@@ -307,7 +308,7 @@ public void testIncompleteMetaTableReplicaInformation() throws Exception {
ADMIN.disableTable(tableName);
// now delete one replica info from all the rows
// this is to make the meta appear to be only partially updated
- Table metaTable = ADMIN.getConnection().getTable(TableName.META_TABLE_NAME);
+ Table metaTable = ADMIN.getConnection().getTable(MetaTableName.getInstance());
for (byte[] row : tableRows) {
Delete deleteOneReplicaLocation = new Delete(row);
deleteOneReplicaLocation.addColumns(HConstants.CATALOG_FAMILY,
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java
index 910692d93c30..14a2f83c21a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRepairMode.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Put;
@@ -93,7 +94,7 @@ public void testNewCluster() throws Exception {
Connection conn = TEST_UTIL.getConnection();
assertTrue(conn.getAdmin().isMasterInMaintenanceMode());
- try (Table table = conn.getTable(TableName.META_TABLE_NAME);
+ try (Table table = conn.getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(new Scan())) {
assertNotNull("Could not read meta.", scanner.next());
}
@@ -120,7 +121,7 @@ public void testExistingCluster() throws Exception {
Connection conn = TEST_UTIL.getConnection();
assertTrue(conn.getAdmin().isMasterInMaintenanceMode());
- try (Table table = conn.getTable(TableName.META_TABLE_NAME);
+ try (Table table = conn.getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(HConstants.TABLE_FAMILY);
Stream results = StreamSupport.stream(scanner.spliterator(), false)) {
assertTrue("Did not find user table records while reading hbase:meta",
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
index e59ef4919126..29b55a9d46a4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -301,7 +302,7 @@ public void testKillRSWithOpeningRegion2482() throws Exception {
*/
private static int addToEachStartKey(final int expected) throws IOException {
Table t = TEST_UTIL.getConnection().getTable(TABLENAME);
- Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
+ Table meta = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance());
int rows = 0;
Scan scan = new Scan();
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
index b6bce31eed9c..12121dbd064b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaAssignmentWithStopMaster.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator;
@@ -66,7 +67,7 @@ public static void tearDownAfterClass() throws Exception {
@Test
public void testStopActiveMaster() throws Exception {
try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration());
- RegionLocator locator = conn.getRegionLocator(TableName.META_TABLE_NAME)) {
+ RegionLocator locator = conn.getRegionLocator(MetaTableName.getInstance())) {
ServerName oldMetaServer = locator.getAllRegionLocations().get(0).getServerName();
ServerName oldMaster = UTIL.getMiniHBaseCluster().getMaster().getServerName();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java
index cdb243b06cdb..91b9325c2831 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateAndMirrorMetaLocations.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@@ -68,7 +69,7 @@ public class TestMigrateAndMirrorMetaLocations {
@BeforeClass
public static void setUp() throws Exception {
UTIL.startMiniCluster(3);
- HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 2);
+ HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 2);
}
@AfterClass
@@ -143,20 +144,20 @@ public void test() throws Exception {
}
// wait until all meta regions have been assigned
UTIL.waitFor(30000,
- () -> UTIL.getMiniHBaseCluster().getRegions(TableName.META_TABLE_NAME).size() == 2);
+ () -> UTIL.getMiniHBaseCluster().getRegions(MetaTableName.getInstance()).size() == 2);
// make sure all the SCPs are finished
waitUntilNoSCP();
checkMirrorLocation(2);
// increase replica count to 3
- HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 3);
+ HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 3);
checkMirrorLocation(3);
byte[] replica2Data = ZKUtil.getData(UTIL.getZooKeeperWatcher(),
UTIL.getZooKeeperWatcher().getZNodePaths().getZNodeForReplica(2));
// decrease replica count to 1
- HBaseTestingUtil.setReplicas(UTIL.getAdmin(), TableName.META_TABLE_NAME, 1);
+ HBaseTestingUtil.setReplicas(UTIL.getAdmin(), MetaTableName.getInstance(), 1);
checkMirrorLocation(1);
// restart the whole cluster, put an extra replica znode on zookeeper, to see if we will remove
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
index 30dd308c28f3..4fa5761a29e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -77,7 +78,7 @@ public static final class SuspendProcedure extends Procedure
@Override
public TableName getTableName() {
- return TableName.META_TABLE_NAME;
+ return MetaTableName.getInstance();
}
@Override
@@ -154,7 +155,7 @@ public static void tearDown() throws Exception {
private void removeNamespaceFamily() throws IOException {
FileSystem fs = UTIL.getTestFileSystem();
Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration());
- Path tableDir = CommonFSUtils.getTableDir(rootDir, TableName.META_TABLE_NAME);
+ Path tableDir = CommonFSUtils.getTableDir(rootDir, MetaTableName.getInstance());
TableDescriptor metaTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
TableDescriptor noNsMetaTableDesc = TableDescriptorBuilder.newBuilder(metaTableDesc)
.removeColumnFamily(HConstants.NAMESPACE_FAMILY).build();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java
index 42f54e5c8758..7d2c74d7ef3c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRecreateCluster.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -126,7 +127,7 @@ private void validateRecreateClusterWithUserTableEnabled(boolean cleanupWALs,
private void restartHBaseCluster(boolean cleanUpWALs, boolean cleanUpZnodes) throws Exception {
// flush cache so that everything is on disk
- TEST_UTIL.getMiniHBaseCluster().flushcache(TableName.META_TABLE_NAME);
+ TEST_UTIL.getMiniHBaseCluster().flushcache(MetaTableName.getInstance());
TEST_UTIL.getMiniHBaseCluster().flushcache();
List oldServers =
@@ -177,7 +178,7 @@ private void prepareDataBeforeRecreate(HBaseTestingUtil testUtil, TableName tabl
put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("c"), Bytes.toBytes("v"));
table.put(put);
- ensureTableNotColocatedWithSystemTable(tableName, TableName.META_TABLE_NAME);
+ ensureTableNotColocatedWithSystemTable(tableName, MetaTableName.getInstance());
}
private void ensureTableNotColocatedWithSystemTable(TableName userTable, TableName systemTable)
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java
index 866f74b73191..7f8b4d9ed1a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartWithEmptyWALDirectory.java
@@ -25,6 +25,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
@@ -80,7 +81,7 @@ public void testRestart() throws IOException, InterruptedException {
table.put(new Put(row).addColumn(FAMILY, QUALIFIER, row));
}
// flush all in memory data
- UTIL.flush(TableName.META_TABLE_NAME);
+ UTIL.flush(MetaTableName.getInstance());
UTIL.flush(NAME);
// stop master first, so when stopping region server, we will not schedule a SCP.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java
index 8263298a8e4f..a679a36f6bb0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestServerCrashProcedureCarryingMetaStuck.java
@@ -22,6 +22,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -64,13 +65,13 @@ public static void tearDown() throws Exception {
public void test() throws Exception {
RegionServerThread rsThread = null;
for (RegionServerThread t : UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
- if (!t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) {
+ if (!t.getRegionServer().getRegions(MetaTableName.getInstance()).isEmpty()) {
rsThread = t;
break;
}
}
HRegionServer rs = rsThread.getRegionServer();
- RegionInfo hri = rs.getRegions(TableName.META_TABLE_NAME).get(0).getRegionInfo();
+ RegionInfo hri = rs.getRegions(MetaTableName.getInstance()).get(0).getRegionInfo();
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
ProcedureExecutor executor = master.getMasterProcedureExecutor();
DummyRegionProcedure proc = new DummyRegionProcedure(executor.getEnvironment(), hri);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java
index b86493287e52..e2e5a7fbe6c5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestCloseRegionWhileRSCrash.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ProcedureTestUtil;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -150,11 +151,11 @@ public static void setUp() throws Exception {
UTIL.createTable(TABLE_NAME, CF);
UTIL.getAdmin().balancerSwitch(false, true);
HRegionServer srcRs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
- if (!srcRs.getRegions(TableName.META_TABLE_NAME).isEmpty()) {
- RegionInfo metaRegion = srcRs.getRegions(TableName.META_TABLE_NAME).get(0).getRegionInfo();
+ if (!srcRs.getRegions(MetaTableName.getInstance()).isEmpty()) {
+ RegionInfo metaRegion = srcRs.getRegions(MetaTableName.getInstance()).get(0).getRegionInfo();
HRegionServer dstRs = UTIL.getOtherRegionServer(srcRs);
UTIL.getAdmin().move(metaRegion.getEncodedNameAsBytes(), dstRs.getServerName());
- UTIL.waitFor(30000, () -> !dstRs.getRegions(TableName.META_TABLE_NAME).isEmpty());
+ UTIL.waitFor(30000, () -> !dstRs.getRegions(MetaTableName.getInstance()).isEmpty());
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
index 2f88f6087dd4..6794ebbbc24d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestOpenRegionProcedureBackoff.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ProcedureTestUtil;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -94,7 +95,7 @@ public static void setUp() throws Exception {
Configuration conf = UTIL.getConfiguration();
conf.setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class);
UTIL.startMiniCluster(1);
- UTIL.waitTableAvailable(TableName.META_TABLE_NAME);
+ UTIL.waitTableAvailable(MetaTableName.getInstance());
}
@AfterClass
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
index 0e00006251ac..6458e233977e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStateStore.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.client.Admin;
@@ -121,7 +122,7 @@ public void testVisitMetaForBadRegionState() throws Exception {
put.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER,
Bytes.toBytes("BAD_STATE"));
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) {
table.put(put);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java
index cd73e09af6db..5e187a998f2a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRollbackSCP.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.BalanceRequest;
import org.apache.hadoop.hbase.master.HMaster;
@@ -167,7 +168,7 @@ public void describeTo(Description description) {
@Test
public void testFailAndRollback() throws Exception {
- HRegionServer rsWithMeta = UTIL.getRSForFirstRegionInTable(TableName.META_TABLE_NAME);
+ HRegionServer rsWithMeta = UTIL.getRSForFirstRegionInTable(MetaTableName.getInstance());
UTIL.getMiniHBaseCluster().killRegionServer(rsWithMeta.getServerName());
UTIL.waitFor(15000, () -> getSCPForServer(rsWithMeta.getServerName()) != null);
ServerCrashProcedure scp = getSCPForServer(rsWithMeta.getServerName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java
index 8a7169b09309..0d5c4a7888ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerConditionalsTestUtil.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
@@ -146,7 +147,7 @@ static void validateReplicaDistribution(Connection connection, TableName tableNa
static void validateRegionLocations(Map> tableToServers,
TableName productTableName, boolean shouldBeBalanced) {
ServerName metaServer =
- tableToServers.get(TableName.META_TABLE_NAME).stream().findFirst().orElseThrow();
+ tableToServers.get(MetaTableName.getInstance()).stream().findFirst().orElseThrow();
ServerName quotaServer =
tableToServers.get(QuotaUtil.QUOTA_TABLE_NAME).stream().findFirst().orElseThrow();
Set productServers = tableToServers.get(productTableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
index dcaebbd84356..f6f8fd1a7eb5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
@@ -161,7 +162,7 @@ public void testRoundRobinAssignment() throws Exception {
LoadBalancer balancer = master.getLoadBalancer();
List regions = admin.getRegions(tableName);
- regions.addAll(admin.getRegions(TableName.META_TABLE_NAME));
+ regions.addAll(admin.getRegions(MetaTableName.getInstance()));
List servers = Lists.newArrayList(
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet());
Map> map = balancer.roundRobinAssignment(regions, servers);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java
index 80f9728651e3..768854c32778 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestMetaTableIsolationBalancerConditional.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -93,7 +94,7 @@ public void testTableIsolation() throws Exception {
BalancerConditionalsTestUtil.generateSplits(2 * NUM_SERVERS));
Set tablesToBeSeparated = ImmutableSet. builder()
- .add(TableName.META_TABLE_NAME).add(QuotaUtil.QUOTA_TABLE_NAME).add(productTableName).build();
+ .add(MetaTableName.getInstance()).add(QuotaUtil.QUOTA_TABLE_NAME).add(productTableName).build();
// Pause the balancer
admin.balancerSwitch(false, true);
@@ -147,7 +148,7 @@ private static void validateRegionLocations(Map> tabl
TableName productTableName, boolean shouldBeBalanced) {
// Validate that the region assignments
ServerName metaServer =
- tableToServers.get(TableName.META_TABLE_NAME).stream().findFirst().orElseThrow();
+ tableToServers.get(MetaTableName.getInstance()).stream().findFirst().orElseThrow();
ServerName quotaServer =
tableToServers.get(QuotaUtil.QUOTA_TABLE_NAME).stream().findFirst().orElseThrow();
Set productServers = tableToServers.get(productTableName);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java
index 88d1a298aa48..006a63625fc5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationBarrierCleaner.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@@ -94,7 +95,7 @@ public static void tearDownAfterClass() throws Exception {
@After
public void tearDown() throws IOException {
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(new Scan().addFamily(HConstants.CATALOG_FAMILY)
.addFamily(HConstants.REPLICATION_BARRIER_FAMILY).setFilter(new FirstKeyOnlyFilter()))) {
for (;;) {
@@ -148,20 +149,20 @@ private void addBarrier(RegionInfo region, long... barriers) throws IOException
put.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER,
put.getTimestamp() - barriers.length + i, Bytes.toBytes(barriers[i]));
}
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) {
table.put(put);
}
}
private void fillCatalogFamily(RegionInfo region) throws IOException {
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) {
table.put(new Put(region.getRegionName()).addColumn(HConstants.CATALOG_FAMILY,
Bytes.toBytes("whatever"), Bytes.toBytes("whatever")));
}
}
private void clearCatalogFamily(RegionInfo region) throws IOException {
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) {
table.delete(new Delete(region.getRegionName()).addFamily(HConstants.CATALOG_FAMILY));
}
}
@@ -281,7 +282,7 @@ public void testDeleteRowForDeletedRegion() throws IOException, ReplicationExcep
// No catalog family, then we should remove the whole row
clearCatalogFamily(region);
cleaner.chore();
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) {
assertFalse(table
.exists(new Get(region.getRegionName()).addFamily(HConstants.REPLICATION_BARRIER_FAMILY)));
}
@@ -303,7 +304,7 @@ public void testDeleteRowForDeletedRegionNoPeers() throws IOException {
// There are no peers, and no catalog family for this region either, so we should remove the
// barriers. And since there is no catalog family, after we delete the barrier family, the whole
// row is deleted.
- try (Table table = UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = UTIL.getConnection().getTable(MetaTableName.getInstance())) {
assertFalse(table.exists(new Get(region.getRegionName())));
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
index cf118260b401..b610c2750041 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestCatalogJanitorInMemoryStates.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaMockingUtil;
import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.Waiter;
@@ -182,7 +183,7 @@ private PairOfSameType waitOnDaughters(final RegionInfo r) throws IO
long start = EnvironmentEdgeManager.currentTime();
PairOfSameType pair = null;
try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
- Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
+ Table metaTable = conn.getTable(MetaTableName.getInstance())) {
Result result = null;
RegionInfo region = null;
while ((EnvironmentEdgeManager.currentTime() - start) < 60000) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java
index 614385ec04d6..75940a4fd42a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/janitor/TestMetaFixerNoCluster.java
@@ -25,6 +25,7 @@
import java.util.List;
import java.util.SortedSet;
import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -50,21 +51,21 @@ public class TestMetaFixerNoCluster {
private static byte[] D = Bytes.toBytes("d");
private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO;
private static RegionInfo _ARI =
- RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(A).build();
+ RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).setEndKey(A).build();
private static RegionInfo _BRI =
- RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setEndKey(B).build();
+ RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).setEndKey(B).build();
private static RegionInfo ABRI =
- RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(B).build();
+ RegionInfoBuilder.newBuilder(MetaTableName.getInstance()).setStartKey(A).setEndKey(B).build();
private static RegionInfo ACRI = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(C).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(A).setEndKey(C).build();
private static RegionInfo CDRI = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).setEndKey(D).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(C).setEndKey(D).build();
private static RegionInfo ADRI = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(A).setEndKey(D).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(A).setEndKey(D).build();
private static RegionInfo D_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(D).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(D).build();
private static RegionInfo C_RI = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(C).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(C).build();
@Test
public void testGetRegionInfoWithLargestEndKey() {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
index e931716e77ed..67537a96def7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
@@ -59,6 +59,7 @@
import org.apache.hadoop.hbase.RegionMetrics;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Size;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -104,7 +105,7 @@ public void before() {
@Test
public void testNoNormalizationForMetaTable() {
- TableName testTable = TableName.META_TABLE_NAME;
+ TableName testTable = MetaTableName.getInstance();
TableDescriptor testMetaTd = TableDescriptorBuilder.newBuilder(testTable).build();
List RegionInfo = new ArrayList<>();
Map regionSizes = new HashMap<>();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
index a878af785783..39c34794bb2f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestHBCKSCP.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -262,7 +263,7 @@ public String toString() {
private static class PrimaryNotMetaRegionSelector extends RegionSelector {
@Override
boolean regionFilter(final RegionInfo info) {
- return !Objects.equals(TableName.META_TABLE_NAME, info.getTable())
+ return !Objects.equals(MetaTableName.getInstance(), info.getTable())
&& Objects.equals(RegionInfo.DEFAULT_REPLICA_ID, info.getReplicaId());
}
@@ -278,7 +279,7 @@ Exception regionFilterFailure() {
private static class ReplicaNonMetaRegionSelector extends RegionSelector {
@Override
boolean regionFilter(RegionInfo info) {
- return !Objects.equals(TableName.META_TABLE_NAME, info.getTable())
+ return !Objects.equals(MetaTableName.getInstance(), info.getTable())
&& !Objects.equals(RegionInfo.DEFAULT_REPLICA_ID, info.getReplicaId());
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
index d2f04c674c97..c5e99471a7f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.client.AsyncAdmin;
@@ -147,7 +148,7 @@ public static void tearDown() throws Exception {
@Test
public void test() throws Exception {
RegionServerThread rsWithMetaThread = UTIL.getMiniHBaseCluster().getRegionServerThreads()
- .stream().filter(t -> !t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty())
+ .stream().filter(t -> !t.getRegionServer().getRegions(MetaTableName.getInstance()).isEmpty())
.findAny().get();
HRegionServer rsNoMeta = UTIL.getOtherRegionServer(rsWithMetaThread.getRegionServer());
FAIL = true;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java
index 386356124f5b..cf58f01b7843 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableProcedureWaitingQueueCleanup.java
@@ -22,6 +22,7 @@
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -122,7 +123,7 @@ public static class MetaTableProcedure extends Procedure
@Override
public TableName getTableName() {
- return TableName.META_TABLE_NAME;
+ return MetaTableName.getInstance();
}
@Override
From cc7b635fbc6e870cfde5aea1a64d0d49ad5d3fc2 Mon Sep 17 00:00:00 2001
From: Kota-SH
Date: Mon, 15 Dec 2025 18:59:21 -0500
Subject: [PATCH 4/6] HBASE-29691: Change TableName.META_TABLE_NAME from being
a global static: org.apache.hadoop.hbase.regionserver
---
.../hbase/regionserver/RSRpcServices.java | 3 +-
.../TestCompactionInDeadRegionServer.java | 3 +-
.../regionserver/TestDefaultMemStore.java | 3 +-
.../TestEndToEndSplitTransaction.java | 3 +-
.../TestGetClosestAtOrBefore.java | 3 +-
.../TestReadAndWriteRegionInfoFile.java | 5 +--
.../hbase/regionserver/TestRegionInfo.java | 35 ++++++++++---------
.../regionserver/TestRegionReplicas.java | 3 +-
.../TestRegionServerCrashDisableWAL.java | 3 +-
.../TestRegionServerNoMaster.java | 5 +--
.../TestRegionServerRejectDuringAbort.java | 3 +-
.../TestShutdownWhileWALBroken.java | 3 +-
.../wal/AbstractTestLogRolling.java | 3 +-
.../regionserver/wal/TestLogRollAbort.java | 3 +-
.../regionserver/wal/TestLogRolling.java | 3 +-
.../wal/TestLogRollingNoCluster.java | 7 ++--
16 files changed, 52 insertions(+), 36 deletions(-)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index fdfea375e096..86a09b0cbf5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -62,6 +62,7 @@
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.Append;
@@ -1925,7 +1926,7 @@ public OpenRegionResponse openRegion(final RpcController controller,
tableName = ProtobufUtil.toTableName(ri.getTableName());
}
}
- if (!TableName.META_TABLE_NAME.equals(tableName)) {
+ if (!MetaTableName.getInstance().equals(tableName)) {
throw new ServiceException(ie);
}
// We are assigning meta, wait a little for regionserver to finish initialization.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java
index 64454ab268fa..58507a063d2d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionInDeadRegionServer.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.client.Get;
@@ -131,7 +132,7 @@ public void test() throws Exception {
HRegionServer regionSvr = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
HRegion region = regionSvr.getRegions(TABLE_NAME).get(0);
String regName = region.getRegionInfo().getEncodedName();
- List metaRegs = regionSvr.getRegions(TableName.META_TABLE_NAME);
+ List metaRegs = regionSvr.getRegions(MetaTableName.getInstance());
if (metaRegs != null && !metaRegs.isEmpty()) {
LOG.info("meta is on the same server: " + regionSvr);
// when region is on same server as hbase:meta, reassigning meta would abort the server
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index af3902c9aa1e..63dbfda6f2d1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -46,6 +46,7 @@
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -1002,7 +1003,7 @@ public void testShouldFlushMeta() throws Exception {
TableDescriptors tds = new FSTableDescriptors(conf);
FSTableDescriptors.tryUpdateMetaTableDescriptor(conf);
HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, testDir, conf,
- tds.get(TableName.META_TABLE_NAME), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
+ tds.get(MetaTableName.getInstance()), wFactory.getWAL(RegionInfoBuilder.FIRST_META_REGIONINFO));
// parameterized tests add [#] suffix get rid of [ and ].
TableDescriptor desc = TableDescriptorBuilder
.newBuilder(TableName.valueOf(name.getMethodName().replaceAll("[\\[\\]]", "_")))
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index 897152f8b6dd..7916101c425e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -43,6 +43,7 @@
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -429,7 +430,7 @@ public static void blockUntilRegionSplit(Configuration conf, long timeout,
log("blocking until region is split:" + Bytes.toStringBinary(regionName));
RegionInfo daughterA = null, daughterB = null;
try (Connection conn = ConnectionFactory.createConnection(conf);
- Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) {
+ Table metaTable = conn.getTable(MetaTableName.getInstance())) {
Result result = null;
RegionInfo region = null;
while ((EnvironmentEdgeManager.currentTime() - start) < timeout) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
index a435b9d9b239..8acd75c5e675 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
@@ -92,7 +93,7 @@ public void testUsingMetaAndBinary() throws IOException {
// Up flush size else we bind up when we use default catalog flush of 16k.
TableDescriptors tds = new FSTableDescriptors(UTIL.getConfiguration());
FSTableDescriptors.tryUpdateMetaTableDescriptor(UTIL.getConfiguration());
- TableDescriptor td = tds.get(TableName.META_TABLE_NAME);
+ TableDescriptor td = tds.get(MetaTableName.getInstance());
td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build();
HRegion mr = HBaseTestingUtil.createRegionAndWAL(RegionInfoBuilder.FIRST_META_REGIONINFO,
rootdir, conf, td);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
index 2869be090f42..f3f824ee5135 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadAndWriteRegionInfoFile.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.HBaseCommonTestingUtil;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -73,12 +74,12 @@ public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedExce
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(FS, ROOT_DIR);
FSTableDescriptors.tryUpdateAndGetMetaTableDescriptor(CONF, FS, ROOT_DIR);
HRegion r = HBaseTestingUtil.createRegionAndWAL(ri, ROOT_DIR, CONF,
- fsTableDescriptors.get(TableName.META_TABLE_NAME));
+ fsTableDescriptors.get(MetaTableName.getInstance()));
// Get modtime on the file.
long modtime = getModTime(r);
HBaseTestingUtil.closeRegionAndWAL(r);
Thread.sleep(1001);
- r = HRegion.openHRegion(ROOT_DIR, ri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null,
+ r = HRegion.openHRegion(ROOT_DIR, ri, fsTableDescriptors.get(MetaTableName.getInstance()), null,
CONF);
// Ensure the file is not written for a second time.
long modtime2 = getModTime(r);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
index 60fe39ecc77f..3a31d1f7fd2d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionInfo.java
@@ -32,6 +32,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionInfoDisplay;
@@ -71,7 +72,7 @@ public class TestRegionInfo {
public void testIsStart() {
assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst());
org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(Bytes.toBytes("not_start")).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(Bytes.toBytes("not_start")).build();
assertFalse(ri.isFirst());
}
@@ -79,7 +80,7 @@ public void testIsStart() {
public void testIsEnd() {
assertTrue(RegionInfoBuilder.FIRST_META_REGIONINFO.isFirst());
org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setEndKey(Bytes.toBytes("not_end")).build();
+ .newBuilder(MetaTableName.getInstance()).setEndKey(Bytes.toBytes("not_end")).build();
assertFalse(ri.isLast());
}
@@ -87,9 +88,9 @@ public void testIsEnd() {
public void testIsNext() {
byte[] bytes = Bytes.toBytes("row");
org.apache.hadoop.hbase.client.RegionInfo ri = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setEndKey(bytes).build();
+ .newBuilder(MetaTableName.getInstance()).setEndKey(bytes).build();
org.apache.hadoop.hbase.client.RegionInfo ri2 = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(bytes).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(bytes).build();
assertFalse(ri.isNext(RegionInfoBuilder.FIRST_META_REGIONINFO));
assertTrue(ri.isNext(ri2));
}
@@ -102,18 +103,18 @@ public void testIsOverlap() {
byte[] d = Bytes.toBytes("d");
org.apache.hadoop.hbase.client.RegionInfo all = RegionInfoBuilder.FIRST_META_REGIONINFO;
org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build();
+ .newBuilder(MetaTableName.getInstance()).setEndKey(a).build();
org.apache.hadoop.hbase.client.RegionInfo abri =
- org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+ org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance())
.setStartKey(a).setEndKey(b).build();
org.apache.hadoop.hbase.client.RegionInfo adri =
- org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+ org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance())
.setStartKey(a).setEndKey(d).build();
org.apache.hadoop.hbase.client.RegionInfo cdri =
- org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+ org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance())
.setStartKey(c).setEndKey(d).build();
org.apache.hadoop.hbase.client.RegionInfo dri = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setStartKey(d).build();
+ .newBuilder(MetaTableName.getInstance()).setStartKey(d).build();
assertTrue(all.isOverlap(all));
assertTrue(all.isOverlap(abri));
assertFalse(abri.isOverlap(cdri));
@@ -140,17 +141,17 @@ public void testIsOverlaps() {
byte[] e = Bytes.toBytes("e");
byte[] f = Bytes.toBytes("f");
org.apache.hadoop.hbase.client.RegionInfo ari = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setEndKey(a).build();
+ .newBuilder(MetaTableName.getInstance()).setEndKey(a).build();
org.apache.hadoop.hbase.client.RegionInfo abri =
- org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+ org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance())
.setStartKey(a).setEndKey(b).build();
org.apache.hadoop.hbase.client.RegionInfo eri = org.apache.hadoop.hbase.client.RegionInfoBuilder
- .newBuilder(TableName.META_TABLE_NAME).setEndKey(e).build();
+ .newBuilder(MetaTableName.getInstance()).setEndKey(e).build();
org.apache.hadoop.hbase.client.RegionInfo cdri =
- org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+ org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance())
.setStartKey(c).setEndKey(d).build();
org.apache.hadoop.hbase.client.RegionInfo efri =
- org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME)
+ org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(MetaTableName.getInstance())
.setStartKey(e).setEndKey(f).build();
assertFalse(ari.isOverlap(abri));
assertTrue(abri.isOverlap(eri));
@@ -175,12 +176,12 @@ public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedExc
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
FSTableDescriptors.tryUpdateMetaTableDescriptor(htu.getConfiguration());
HRegion r = HBaseTestingUtil.createRegionAndWAL(hri, basedir, htu.getConfiguration(),
- fsTableDescriptors.get(TableName.META_TABLE_NAME));
+ fsTableDescriptors.get(MetaTableName.getInstance()));
// Get modtime on the file.
long modtime = getModTime(r);
HBaseTestingUtil.closeRegionAndWAL(r);
Thread.sleep(1001);
- r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null,
+ r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(MetaTableName.getInstance()), null,
htu.getConfiguration());
// Ensure the file is not written for a second time.
long modtime2 = getModTime(r);
@@ -254,7 +255,7 @@ public void testContainsRange() {
@Test
public void testContainsRangeForMetaTable() {
TableDescriptor tableDesc =
- TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
+ TableDescriptorBuilder.newBuilder(MetaTableName.getInstance()).build();
RegionInfo hri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()).build();
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] row1 = Bytes.toBytes("a,a,0");
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
index 68c6b6434c4f..2ef40ffeb257 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TestMetaTableAccessor;
import org.apache.hadoop.hbase.client.Consistency;
import org.apache.hadoop.hbase.client.Get;
@@ -147,7 +148,7 @@ public void testRegionReplicaUpdatesMetaLocation() throws Exception {
openRegion(HTU, getRS(), hriSecondary);
Table meta = null;
try {
- meta = HTU.getConnection().getTable(TableName.META_TABLE_NAME);
+ meta = HTU.getConnection().getTable(MetaTableName.getInstance());
TestMetaTableAccessor.assertMetaLocation(meta, hriPrimary.getRegionName(),
getRS().getServerName(), -1, 1, false);
} finally {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java
index 3fad6e16bf76..f5b3611fad29 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerCrashDisableWAL.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
@@ -65,7 +66,7 @@ public static void setUp() throws Exception {
UTIL.createTable(TABLE_NAME, CF);
UTIL.waitTableAvailable(TABLE_NAME);
HRegionServer rs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
- if (!rs.getRegions(TableName.META_TABLE_NAME).isEmpty()) {
+ if (!rs.getRegions(MetaTableName.getInstance()).isEmpty()) {
HRegionServer rs1 = UTIL.getOtherRegionServer(rs);
UTIL.moveRegionAndWait(
UTIL.getMiniHBaseCluster().getRegions(TABLE_NAME).get(0).getRegionInfo(),
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index 522b0ea884b3..96a8db5cabe6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionLocator;
@@ -97,11 +98,11 @@ public static void stopMasterAndCacheMetaLocation(HBaseTestingUtil HTU)
// cache meta location, so we will not go to master to lookup meta region location
for (JVMClusterUtil.RegionServerThread t : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
try (RegionLocator locator =
- t.getRegionServer().getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+ t.getRegionServer().getConnection().getRegionLocator(MetaTableName.getInstance())) {
locator.getAllRegionLocations();
}
}
- try (RegionLocator locator = HTU.getConnection().getRegionLocator(TableName.META_TABLE_NAME)) {
+ try (RegionLocator locator = HTU.getConnection().getRegionLocator(MetaTableName.getInstance())) {
locator.getAllRegionLocations();
}
// Stop master
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java
index 61da536310a8..177b2c7d7787 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRejectDuringAbort.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -135,7 +136,7 @@ public void testRejectRequestsOnAbort() throws Exception {
.getRegionServerThreads()) {
HRegionServer regionServer = regionServerThread.getRegionServer();
if (
- regionServer.getRegions(TableName.META_TABLE_NAME).isEmpty()
+ regionServer.getRegions(MetaTableName.getInstance()).isEmpty()
&& !regionServer.getRegions(TABLE_NAME).isEmpty()
) {
serverWithoutMeta = regionServer;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java
index 0bc7deccc121..37d9d5954d90 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestShutdownWhileWALBroken.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.client.Table;
@@ -135,7 +136,7 @@ public void test() throws Exception {
RegionServerThread rst1 = UTIL.getMiniHBaseCluster().getRegionServerThreads().get(1);
HRegionServer liveRS;
RegionServerThread toKillRSThread;
- if (rst1.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty()) {
+ if (rst1.getRegionServer().getRegions(MetaTableName.getInstance()).isEmpty()) {
liveRS = rst0.getRegionServer();
toKillRSThread = rst1;
} else {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
index 2a5aec458828..563f5f03dfcb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -338,7 +339,7 @@ void validateData(Table table, int rownum) throws IOException {
public void testCompactionRecordDoesntBlockRolling() throws Exception {
// When the hbase:meta table can be opened, the region servers are running
- try (Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
+ try (Table t = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance());
Table table = createTestTable(getName())) {
server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
index 3c3dbe1ead9e..511b58c9afd5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollAbort.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SingleProcessHBaseCluster;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
@@ -162,7 +163,7 @@ public void testRSAbortWithUnflushedEdits() throws Exception {
LOG.info("Starting testRSAbortWithUnflushedEdits()");
// When the hbase:meta table can be opened, the region servers are running
- TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close();
+ TEST_UTIL.getConnection().getTable(MetaTableName.getInstance()).close();
// Create the test table and open it
TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index 43477f21f7f8..ad72bea40c89 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -322,7 +323,7 @@ public void testLogRollOnPipelineRestart() throws Exception {
fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()) > 1);
LOG.info("Replication=" + fs.getDefaultReplication(TEST_UTIL.getDataTestDirOnTestFS()));
// When the hbase:meta table can be opened, the region servers are running
- Table t = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
+ Table t = TEST_UTIL.getConnection().getTable(MetaTableName.getInstance());
try {
this.server = cluster.getRegionServer(0);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
index dd4fe77c8a38..ac62e0395325 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java
@@ -31,6 +31,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
@@ -95,7 +96,7 @@ public void testContendedLogRolling() throws Exception {
CommonFSUtils.setRootDir(conf, dir);
FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(TEST_UTIL.getConfiguration());
FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration());
- TableDescriptor metaTableDescriptor = fsTableDescriptors.get(TableName.META_TABLE_NAME);
+ TableDescriptor metaTableDescriptor = fsTableDescriptors.get(MetaTableName.getInstance());
conf.set(FSHLogProvider.WRITER_IMPL, HighLatencySyncWriter.class.getName());
final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName());
final WAL wal = wals.getWAL(null);
@@ -159,7 +160,7 @@ public void run() {
try {
TableDescriptors tds = new FSTableDescriptors(TEST_UTIL.getConfiguration());
FSTableDescriptors.tryUpdateMetaTableDescriptor(TEST_UTIL.getConfiguration());
- TableDescriptor htd = tds.get(TableName.META_TABLE_NAME);
+ TableDescriptor htd = tds.get(MetaTableName.getInstance());
for (int i = 0; i < this.count; i++) {
long now = EnvironmentEdgeManager.currentTime();
// Roll every ten edits
@@ -176,7 +177,7 @@ public void run() {
scopes.put(fam, 0);
}
final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(),
- TableName.META_TABLE_NAME, now, mvcc, scopes), edit);
+ MetaTableName.getInstance(), now, mvcc, scopes), edit);
Threads.sleep(ThreadLocalRandom.current().nextInt(5));
wal.sync(txid);
}
From cb144424f6e81c706bf2acacc632ba3de4aa4f64 Mon Sep 17 00:00:00 2001
From: Kota-SH
Date: Mon, 15 Dec 2025 19:10:34 -0500
Subject: [PATCH 5/6] HBASE-29691: Change TableName.META_TABLE_NAME from being
a global static: rest
---
.../backup/impl/IncrementalBackupManager.java | 3 +-
.../favored/FavoredNodeAssignmentHelper.java | 3 +-
.../SnapshotOfRegionAssignmentFromMeta.java | 9 +-
...rgeClusterBalancingMetaTableIsolation.java | 5 +-
...gTableIsolationAndReplicaDistribution.java | 5 +-
.../hbase/security/token/ClientTokenUtil.java | 5 +-
.../hbase/shaded/protobuf/ProtobufUtil.java | 3 +-
.../hbase/client/TestCompactFromClient.java | 4 +-
.../apache/hadoop/hbase/MetaTableName.java | 56 +++++-------
.../org/apache/hadoop/hbase/TableName.java | 4 +-
.../master/MetricsMasterFileSystemSource.java | 4 +-
.../hbase/mapreduce/TestImportExport.java | 3 +-
.../hadoop/hbase/rest/TestStatusResource.java | 3 +-
.../model/TestStorageClusterStatusModel.java | 5 +-
.../hbase/coprocessor/MetaTableMetrics.java | 5 +-
.../ReplicationBarrierFamilyFormat.java | 5 +-
.../hbase/security/access/AccessChecker.java | 3 +-
.../hadoop/hbase/tool/BulkLoadHFilesTool.java | 13 +--
.../hadoop/hbase/util/FSTableDescriptors.java | 11 +--
.../org/apache/hadoop/hbase/util/FSUtils.java | 1 +
.../apache/hadoop/hbase/util/HBaseFsck.java | 88 +++++++++++--------
.../hadoop/hbase/util/HBaseFsckRepair.java | 3 +-
.../apache/hadoop/hbase/util/RegionMover.java | 9 +-
.../wal/BoundedRecoveredHFilesOutputSink.java | 6 +-
.../hbase-webapps/master/catalogTables.jsp | 3 +-
.../resources/hbase-webapps/master/table.jsp | 7 +-
.../hadoop/hbase/HBaseClusterInterface.java | 2 +-
.../hadoop/hbase/TestHBaseMetaEdit.java | 18 ++--
.../TestMetaUpdatesGoToPriorityQueue.java | 2 +-
.../apache/hadoop/hbase/TestNamespace.java | 2 +-
.../hbase/TestServerInternalsTracing.java | 2 +-
.../hadoop/hbase/http/TestInfoServersACL.java | 5 +-
.../TestReplicationWALEntryFilters.java | 3 +-
.../TestMetaRegionReplicaReplication.java | 27 +++---
.../regionserver/TestReplicationSource.java | 3 +-
.../TestSerialReplicationChecker.java | 7 +-
.../rsgroup/TestRSGroupsCPHookCalled.java | 3 +-
.../hbase/rsgroup/TestRSGroupsKillRS.java | 5 +-
.../security/access/TestRpcAccessChecks.java | 5 +-
.../token/TestGenerateDelegationToken.java | 3 +-
.../snapshot/TestRegionSnapshotTask.java | 3 +-
.../hadoop/hbase/util/BaseTestHBaseFsck.java | 9 +-
.../hbase/util/TestFSTableDescriptors.java | 7 +-
...TestHBaseFsckCleanReplicationBarriers.java | 3 +-
.../hadoop/hbase/util/TestHBaseFsckMOB.java | 3 +-
.../hadoop/hbase/util/TestRegionMover1.java | 3 +-
.../TestRegionMoverWithRSGroupEnable.java | 3 +-
.../apache/hadoop/hbase/wal/TestWALSplit.java | 5 +-
hbase-shell/src/main/ruby/hbase/table.rb | 2 +-
.../thrift/ThriftHBaseServiceHandler.java | 5 +-
.../hbase/zookeeper/MetaTableLocator.java | 19 ++--
.../apache/hadoop/hbase/zookeeper/ZKDump.java | 3 +-
52 files changed, 231 insertions(+), 187 deletions(-)
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index 20884edf836e..76599704d87e 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
@@ -169,7 +170,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps,
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
+ LOG.debug("Skip {} log file: {}", MetaTableName.getInstance(), log.getPath().getName());
}
continue;
}
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 6c021bf622a5..8a36477913c9 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
@@ -131,7 +132,7 @@ public static void updateMetaWithFavoredNodesInfo(
puts.add(put);
}
}
- try (Table table = connection.getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = connection.getTable(MetaTableName.getInstance())) {
table.put(puts);
}
LOG.info("Added " + puts.size() + " region favored nodes in META");
diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
index 02c18c73bfb5..783ad2cbbc23 100644
--- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
+++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
@@ -170,9 +171,10 @@ private void processMetaRecord(Result result) throws IOException {
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {
- LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
+ LOG.info("Start to scan {} for the current region assignment snapshot",
+ MetaTableName.getInstance());
// Scan hbase:meta to pick up user regions
- try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
+ try (Table metaTable = connection.getTable(MetaTableName.getInstance());
ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) {
for (;;) {
Result result = scanner.next();
@@ -187,7 +189,8 @@ public void initialize() throws IOException {
}
}
}
- LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
+ LOG.info("Finished scanning {} for the current region assignment snapshot",
+ MetaTableName.getInstance());
}
private void addRegion(RegionInfo regionInfo) {
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
index 3548571286c0..8383b10df0ce 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingMetaTableIsolation.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -68,7 +69,7 @@ public static void setup() {
// Create regions
List allRegions = new ArrayList<>();
for (int i = 0; i < NUM_REGIONS; i++) {
- TableName tableName = i < 3 ? TableName.META_TABLE_NAME : NON_META_TABLE_NAME;
+ TableName tableName = i < 3 ? MetaTableName.getInstance() : NON_META_TABLE_NAME;
byte[] startKey = new byte[1];
startKey[0] = (byte) i;
byte[] endKey = new byte[1];
@@ -95,7 +96,7 @@ public void testMetaTableIsolation() {
}
private boolean isMetaTableIsolated(BalancerClusterState cluster) {
- return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta");
+ return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta");
}
}
diff --git a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
index 0ea739faf78b..aacddd20cdcb 100644
--- a/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
+++ b/hbase-balancer/src/test/java/org/apache/hadoop/hbase/master/balancer/TestLargeClusterBalancingTableIsolationAndReplicaDistribution.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -72,7 +73,7 @@ public static void setup() {
for (int i = 0; i < NUM_REGIONS; i++) {
TableName tableName;
if (i < 1) {
- tableName = TableName.META_TABLE_NAME;
+ tableName = MetaTableName.getInstance();
} else if (i < 10) {
tableName = SYSTEM_TABLE_NAME;
} else {
@@ -116,7 +117,7 @@ public void testTableIsolationAndReplicaDistribution() {
* Validates whether all meta table regions are isolated.
*/
private boolean isMetaTableIsolated(BalancerClusterState cluster) {
- return isTableIsolated(cluster, TableName.META_TABLE_NAME, "Meta");
+ return isTableIsolated(cluster, MetaTableName.getInstance(), "Meta");
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
index 40ff0373c36c..a4dabfb2465e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java
@@ -23,6 +23,7 @@
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.Connection;
@@ -73,7 +74,7 @@ private static void injectFault() throws ServiceException {
future.completeExceptionally(ProtobufUtil.handleRemoteException(injectedException));
return future;
}
- AsyncTable> table = conn.getTable(TableName.META_TABLE_NAME);
+ AsyncTable> table = conn.getTable(MetaTableName.getInstance());
table. coprocessorService(
AuthenticationProtos.AuthenticationService::newStub,
@@ -102,7 +103,7 @@ static Token obtainToken(Connection conn) throws
try {
injectFault();
- meta = conn.getTable(TableName.META_TABLE_NAME);
+ meta = conn.getTable(MetaTableName.getInstance());
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 60175137ad2c..cd341a911ce0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -70,6 +70,7 @@
import org.apache.hadoop.hbase.ServerTask;
import org.apache.hadoop.hbase.ServerTaskBuilder;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.BalanceRequest;
import org.apache.hadoop.hbase.client.BalanceResponse;
@@ -3325,7 +3326,7 @@ public static String toLockJson(List lockedRes
long regionId = proto.getRegionId();
int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId;
- if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) {
+ if (tableName.equals(MetaTableName.getInstance()) && replicaId == defaultReplicaId) {
return RegionInfoBuilder.FIRST_META_REGIONINFO;
}
byte[] startKey = null;
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
index 40617d78950a..c8e6b2158ce1 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCompactFromClient.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hbase.client;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -34,6 +33,7 @@
import java.util.concurrent.atomic.AtomicReference;
import org.apache.hadoop.hbase.ClientMetaTableAccessor;
import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -94,7 +94,7 @@ public void testCompactTableWithNullLocations() throws Exception {
mockedMeta.when(() -> ClientMetaTableAccessor.getTableHRegionLocations(any(AsyncTable.class),
any(TableName.class))).thenReturn(nullLocationsFuture);
AsyncTable metaTable = mock(AsyncTable.class);
- when(connection.getTable(META_TABLE_NAME)).thenReturn(metaTable);
+ when(connection.getTable(MetaTableName.getInstance())).thenReturn(metaTable);
HashedWheelTimer hashedWheelTimer = mock(HashedWheelTimer.class);
AsyncAdminBuilderBase asyncAdminBuilderBase = mock(AsyncAdminBuilderBase.class);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
index f4afd8bbe1c7..90bb4ccc0630 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/MetaTableName.java
@@ -1,33 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package org.apache.hadoop.hbase;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hbase.thirdparty.com.google.common.base.Strings;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * Singleton class for managing the META_TABLE_NAME instance.
- * This allows the meta table name to be overridden for testing using reflection.
- */
+
@InterfaceAudience.Public
public class MetaTableName {
private static final Logger LOG = LoggerFactory.getLogger(MetaTableName.class);
-
- /**
- * The singleton instance of the meta table name.
- * This field can be overridden for testing using reflection.
- */
private static volatile TableName instance;
private MetaTableName() {
- // Private constructor to prevent instantiation
}
/**
* Get the singleton instance of the meta table name.
- * Initializes lazily using the default configuration if not already set.
- *
* @return The meta table name instance
*/
public static TableName getInstance() {
@@ -35,7 +40,7 @@ public static TableName getInstance() {
synchronized (MetaTableName.class) {
if (instance == null) {
instance = initializeHbaseMetaTableName(HBaseConfiguration.create());
- LOG.info("Meta table name initialized: {}", instance);
+ LOG.info("Meta table name initialized: {}", instance.getName());
}
}
}
@@ -49,26 +54,9 @@ public static TableName getInstance() {
* @return The initialized meta table name
*/
private static TableName initializeHbaseMetaTableName(Configuration conf) {
- String suffix_val = conf.get(HConstants.HBASE_META_TABLE_SUFFIX,
- HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE);
- LOG.info("Meta table suffix value: {}", suffix_val);
- if (Strings.isNullOrEmpty(suffix_val)) {
- return TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
- } else {
- return TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta_" + suffix_val);
- }
- }
-
- /**
- * Get the instance field for reflection-based testing.
- * This method is package-private to allow test classes to access the field.
- *
- * @return The Field object for the instance field
- */
- static java.lang.reflect.Field getInstanceField() throws NoSuchFieldException {
- java.lang.reflect.Field field = MetaTableName.class.getDeclaredField("instance");
- field.setAccessible(true);
- return field;
+ TableName metaTableName = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
+ LOG.info("Meta table suffix value: {}", metaTableName);
+ return metaTableName;
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index 6fdfc1edf001..0b798c505819 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -84,7 +84,7 @@ public final class TableName implements Comparable {
* future version.
*/
@Deprecated
- public static TableName META_TABLE_NAME;
+ public static TableName META_TABLE_NAME = TableName.valueOf("hbase:meta");
/**
* The Namespace table's name.
@@ -306,7 +306,7 @@ private TableName(ByteBuffer namespace, ByteBuffer qualifier) throws IllegalArgu
}
if (qualifierAsString.equals(OLD_META_STR)) {
throw new IllegalArgumentException(
- OLD_META_STR + " no longer exists. The table has been " + "renamed to " + META_TABLE_NAME);
+ OLD_META_STR + " no longer exists. The table has been " + "renamed to " + MetaTableName.getInstance());
}
if (Bytes.equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME, namespace)) {
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
index 53ed8a25ed0e..9bc4a90c8cf5 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
@@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hbase.master;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.metrics.BaseSource;
import org.apache.yetus.audience.InterfaceAudience;
@@ -49,7 +51,7 @@ public interface MetricsMasterFileSystemSource extends BaseSource {
String SPLIT_SIZE_NAME = "hlogSplitSize";
String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
- String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split";
+ String META_SPLIT_SIZE_DESC = "Size of " + MetaTableName.getInstance() + " WAL files being split";
String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()";
String SPLIT_SIZE_DESC = "Size of WAL files being split";
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index d4ccac901436..09ce98ddc172 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -52,6 +52,7 @@
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.ClientInternalHelper;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -256,7 +257,7 @@ public void testSimpleCase() throws Throwable {
@Test
public void testMetaExport() throws Throwable {
String[] args =
- new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" };
+ new String[] { MetaTableName.getInstance().getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" };
assertTrue(runExport(args));
}
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
index a115fd17af3f..b493be5c9f06 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.rest.client.Client;
import org.apache.hadoop.hbase.rest.client.Cluster;
@@ -55,7 +56,7 @@ public class TestStatusResource {
private static final Logger LOG = LoggerFactory.getLogger(TestStatusResource.class);
- private static final byte[] META_REGION_NAME = Bytes.toBytes(TableName.META_TABLE_NAME + ",,1");
+ private static final byte[] META_REGION_NAME = Bytes.toBytes(MetaTableName.getInstance() + ",,1");
private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static final HBaseRESTTestingUtility REST_TEST_UTIL = new HBaseRESTTestingUtility();
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
index 8310232890dd..c3761c397c8a 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/model/TestStorageClusterStatusModel.java
@@ -24,6 +24,7 @@
import java.util.Iterator;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.testclassification.RestTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
@@ -89,7 +90,7 @@ protected StorageClusterStatusModel buildTestModel() {
model.addLiveNode("test1", 1245219839331L, 128, 1024).addRegion(Bytes.toBytes("hbase:root,,0"),
1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1);
model.addLiveNode("test2", 1245239331198L, 512, 1024).addRegion(
- Bytes.toBytes(TableName.META_TABLE_NAME + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1,
+ Bytes.toBytes(MetaTableName.getInstance() + ",,1246000043724"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1,
1, 1);
return model;
}
@@ -128,7 +129,7 @@ protected void checkModel(StorageClusterStatusModel model) {
assertEquals(1024, node.getMaxHeapSizeMB());
regions = node.getRegions().iterator();
region = regions.next();
- assertEquals(Bytes.toString(region.getName()), TableName.META_TABLE_NAME + ",,1246000043724");
+ assertEquals(Bytes.toString(region.getName()), MetaTableName.getInstance() + ",,1246000043724");
assertEquals(1, region.getStores());
assertEquals(1, region.getStorefiles());
assertEquals(0, region.getStorefileSizeMB());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
index 3cac1f319dae..fc2d164e922d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MetaTableMetrics.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
@@ -133,7 +134,7 @@ private String getRegionIdFromOp(Row op) {
}
private boolean isMetaTableOp(ObserverContext extends RegionCoprocessorEnvironment> e) {
- return TableName.META_TABLE_NAME.equals(e.getEnvironment().getRegionInfo().getTable());
+ return MetaTableName.getInstance().equals(e.getEnvironment().getRegionInfo().getTable());
}
private void clientMetricRegisterAndMark() {
@@ -268,7 +269,7 @@ public void start(CoprocessorEnvironment env) throws IOException {
env instanceof RegionCoprocessorEnvironment
&& ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable() != null
&& ((RegionCoprocessorEnvironment) env).getRegionInfo().getTable()
- .equals(TableName.META_TABLE_NAME)
+ .equals(MetaTableName.getInstance())
) {
RegionCoprocessorEnvironment regionCoprocessorEnv = (RegionCoprocessorEnvironment) env;
registry = regionCoprocessorEnv.getMetricRegistryForRegionServer();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java
index 8bf32baada22..f82ac9bd42ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationBarrierFamilyFormat.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -192,7 +193,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co
.addColumn(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER)
.addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true)
.setCaching(10);
- try (Table table = conn.getTable(TableName.META_TABLE_NAME);
+ try (Table table = conn.getTable(MetaTableName.getInstance());
ResultScanner scanner = table.getScanner(scan)) {
for (Result result;;) {
result = scanner.next();
@@ -215,7 +216,7 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co
public static long[] getReplicationBarriers(Connection conn, byte[] regionName)
throws IOException {
- try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
+ try (Table table = conn.getTable(MetaTableName.getInstance())) {
Result result = table.get(new Get(regionName)
.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER)
.readAllVersions());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
index 57d156ab1c2e..637785c2b62e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -544,7 +545,7 @@ public AuthResult permissionGranted(String request, User user, Action permReques
TableName tableName, Map> families) {
// 1. All users need read access to hbase:meta table.
// this is a very common operation, so deal with it quickly.
- if (TableName.META_TABLE_NAME.equals(tableName)) {
+ if (MetaTableName.getInstance().equals(tableName)) {
if (permRequest == Action.READ) {
return AuthResult.allow(request, "All users allowed", user, permRequest, tableName,
families);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
index 4d6f57e22edc..6c206557027e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java
@@ -66,6 +66,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
@@ -660,21 +661,21 @@ private int getRegionIndex(List> startEndKeys, byte[] key)
private void checkRegionIndexValid(int idx, List> startEndKeys,
TableName tableName) throws IOException {
if (idx < 0) {
- throw new IOException("The first region info for table " + tableName
- + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
+ throw new IOException("The first region info for table " + tableName + " can't be found in "
+ + MetaTableName.getInstance() + ". Please use hbck tool to fix it" + " first.");
} else if (
(idx == startEndKeys.size() - 1)
&& !Bytes.equals(startEndKeys.get(idx).getSecond(), HConstants.EMPTY_BYTE_ARRAY)
) {
- throw new IOException("The last region info for table " + tableName
- + " can't be found in hbase:meta.Please use hbck tool to fix it first.");
+ throw new IOException("The last region info for table " + tableName + " can't be found in "
+ + MetaTableName.getInstance() + ". Please use hbck tool to fix it" + " first.");
} else if (
idx + 1 < startEndKeys.size() && !(Bytes.compareTo(startEndKeys.get(idx).getSecond(),
startEndKeys.get(idx + 1).getFirst()) == 0)
) {
throw new IOException("The endkey of one region for table " + tableName
- + " is not equal to the startkey of the next region in hbase:meta."
- + "Please use hbck tool to fix it first.");
+ + " is not equal to the startkey of the next region in " + MetaTableName.getInstance() + "."
+ + " Please use hbck tool to fix it first.");
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 75bf721ef41e..28cb4bcb69e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.CoprocessorDescriptorBuilder;
@@ -147,20 +148,20 @@ public static TableDescriptor tryUpdateAndGetMetaTableDescriptor(Configuration c
FileSystem fs, Path rootdir) throws IOException {
// see if we already have meta descriptor on fs. Write one if not.
Optional> opt = getTableDescriptorFromFs(fs,
- CommonFSUtils.getTableDir(rootdir, TableName.META_TABLE_NAME), false);
+ CommonFSUtils.getTableDir(rootdir, MetaTableName.getInstance()), false);
if (opt.isPresent()) {
return opt.get().getSecond();
}
TableDescriptorBuilder builder = createMetaTableDescriptorBuilder(conf);
TableDescriptor td = StoreFileTrackerFactory.updateWithTrackerConfigs(conf, builder.build());
- LOG.info("Creating new hbase:meta table descriptor {}", td);
+ LOG.info("Creating new {} table descriptor {}", MetaTableName.getInstance(), td);
TableName tableName = td.getTableName();
Path tableDir = CommonFSUtils.getTableDir(rootdir, tableName);
Path p = writeTableDescriptor(fs, td, tableDir, null);
if (p == null) {
- throw new IOException("Failed update hbase:meta table descriptor");
+ throw new IOException("Failed update " + MetaTableName.getInstance() + " table descriptor");
}
- LOG.info("Updated hbase:meta table descriptor to {}", p);
+ LOG.info("Updated {} table descriptor to {}", MetaTableName.getInstance(), p);
return td;
}
@@ -198,7 +199,7 @@ private static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Con
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
// the META table data goes to File mode BC only. Test how that affect the system. If too much,
// we have to rethink about adding back the setCacheDataInL1 for META table CFs.
- return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+ return TableDescriptorBuilder.newBuilder(MetaTableName.getInstance())
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
.setMaxVersions(
conf.getInt(HConstants.HBASE_META_VERSIONS, HConstants.DEFAULT_HBASE_META_VERSIONS))
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 3b446826b775..0b502c3c5ca6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -69,6 +69,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index c3eafa7c11d1..9bfbef4a2536 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -84,6 +84,7 @@
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
@@ -540,7 +541,7 @@ public void run() {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
- meta = connection.getTable(TableName.META_TABLE_NAME);
+ meta = connection.getTable(MetaTableName.getInstance());
status = admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS,
Option.MASTER, Option.BACKUP_MASTERS, Option.REGIONS_IN_TRANSITION, Option.HBASE_VERSION));
}
@@ -660,17 +661,19 @@ public int onlineConsistencyRepair() throws IOException, KeeperException, Interr
reportUnknownServers();
// Check if hbase:meta is found only once and in the right place
if (!checkMetaRegion()) {
- String errorMsg = "hbase:meta table is not consistent. ";
+ String errorMsg = MetaTableName.getInstance() + " table is not consistent. ";
if (shouldFixAssignments()) {
- errorMsg += "HBCK will try fixing it. Rerun once hbase:meta is back to consistent state.";
+ errorMsg += "HBCK will try fixing it. Rerun once " + MetaTableName.getInstance() + " is back "
+ + "to consistent state.";
} else {
- errorMsg += "Run HBCK with proper fix options to fix hbase:meta inconsistency.";
+ errorMsg += "Run HBCK with proper fix options to fix " + MetaTableName.getInstance()
+ + " inconsistency.";
}
errors.reportError(errorMsg + " Exiting...");
return -2;
}
// Not going with further consistency check for tables when hbase:meta itself is not consistent.
- LOG.info("Loading regionsinfo from the hbase:meta table");
+ LOG.info("Loading regionsinfo from the {} table", MetaTableName.getInstance());
boolean success = loadMetaEntries();
if (!success) return -1;
@@ -1219,7 +1222,7 @@ private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IO
* TODO -- need to add tests for this.
*/
private void reportEmptyMetaCells() {
- errors.print("Number of empty REGIONINFO_QUALIFIER rows in hbase:meta: "
+ errors.print("Number of empty REGIONINFO_QUALIFIER rows in " + MetaTableName.getInstance() + ": "
+ emptyRegionInfoQualifiers.size());
if (details) {
for (Result r : emptyRegionInfoQualifiers) {
@@ -1371,7 +1374,7 @@ private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
*/
public void fixEmptyMetaCells() throws IOException {
if (shouldFixEmptyMetaCells() && !emptyRegionInfoQualifiers.isEmpty()) {
- LOG.info("Trying to fix empty REGIONINFO_QUALIFIER hbase:meta rows.");
+ LOG.info("Trying to fix empty REGIONINFO_QUALIFIER {} rows.", MetaTableName.getInstance());
for (Result region : emptyRegionInfoQualifiers) {
deleteMetaRegion(region.getRow());
errors.getErrorList().remove(ERROR_CODE.EMPTY_META_CELL);
@@ -1574,8 +1577,8 @@ private void loadTableStates() throws IOException {
// Add hbase:meta so this tool keeps working. In hbase2, meta is always enabled though it
// has no entry in the table states. HBCK doesn't work right w/ hbase2 but just do this in
// meantime.
- this.tableStates.put(TableName.META_TABLE_NAME,
- new TableState(TableName.META_TABLE_NAME, TableState.State.ENABLED));
+ this.tableStates.put(MetaTableName.getInstance(),
+ new TableState(MetaTableName.getInstance(), TableState.State.ENABLED));
}
/**
@@ -1604,7 +1607,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException {
TableName tableName = CommonFSUtils.getTableName(path);
if (
(!checkMetaOnly && isTableIncluded(tableName))
- || tableName.equals(TableName.META_TABLE_NAME)
+ || tableName.equals(MetaTableName.getInstance())
) {
tableDirs.add(fs.getFileStatus(path));
}
@@ -1649,7 +1652,7 @@ public void loadHdfsRegionDirs() throws IOException, InterruptedException {
*/
private boolean recordMetaRegion() throws IOException {
List locs;
- try (RegionLocator locator = connection.getRegionLocator(TableName.META_TABLE_NAME)) {
+ try (RegionLocator locator = connection.getRegionLocator(MetaTableName.getInstance())) {
locs = locator.getRegionLocations(HConstants.EMPTY_START_ROW, true);
}
if (locs == null || locs.isEmpty()) {
@@ -2019,9 +2022,11 @@ void closeRegion(HbckRegionInfo hi) throws IOException, InterruptedException {
}
RegionInfo hri = h.getRegion();
if (hri == null) {
- LOG.warn("Unable to close region " + hi.getRegionNameAsString()
- + " because hbase:meta had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":"
- + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.");
+ LOG.warn(
+ "Unable to close region " + hi.getRegionNameAsString()
+ + " because {} had invalid or missing " + HConstants.CATALOG_FAMILY_STR + ":"
+ + Bytes.toString(HConstants.REGIONINFO_QUALIFIER) + " qualifier value.",
+ MetaTableName.getInstance());
continue;
}
// close the region -- close files and remove assignment
@@ -2140,8 +2145,9 @@ else if (!inMeta && !inHdfs && !isDeployed) {
assert false : "Entry for region with no data";
} else if (!inMeta && !inHdfs && isDeployed) {
errors.reportError(ERROR_CODE.NOT_IN_META_HDFS,
- "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in hbase:meta but "
- + "deployed on " + Joiner.on(", ").join(hbi.getDeployedOn()));
+ "Region " + descriptiveName + ", key=" + key + ", not on HDFS or in "
+ + MetaTableName.getInstance() + " but " + "deployed on "
+ + Joiner.on(", ").join(hbi.getDeployedOn()));
if (shouldFixAssignments()) {
undeployRegions(hbi);
}
@@ -2155,8 +2161,9 @@ else if (!inMeta && !inHdfs && !isDeployed) {
+ " got merge recently, its file(s) will be cleaned by CatalogJanitor later");
return;
}
- errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED, "Region " + descriptiveName
- + " on HDFS, but not listed in hbase:meta " + "or deployed on any region server");
+ errors.reportError(ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+ "Region " + descriptiveName + " on HDFS, but not listed in " + MetaTableName.getInstance()
+ + " or deployed on any region server");
// restore region consistency of an adopted orphan
if (shouldFixMeta()) {
if (!hbi.isHdfsRegioninfoPresent()) {
@@ -2196,7 +2203,7 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
}
}
- LOG.info("Patching hbase:meta with .regioninfo: " + hbi.getHdfsHRI());
+ LOG.info("Patching {} with .regioninfo: " + hbi.getHdfsHRI(), MetaTableName.getInstance());
int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2224,7 +2231,8 @@ else if (!inMeta && !inHdfs && !isDeployed) {
return;
}
- LOG.info("Patching hbase:meta with with .regioninfo: " + hbi.getHdfsHRI());
+ LOG.info("Patching {} with with .regioninfo: " + hbi.getHdfsHRI(),
+ MetaTableName.getInstance());
int numReplicas = admin.getDescriptor(hbi.getTableName()).getRegionReplication();
HBaseFsckRepair.fixMetaHoleOnlineAndAddReplicas(getConf(), hbi.getHdfsHRI(),
admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(),
@@ -2301,9 +2309,9 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
} else if (inMeta && inHdfs && isMultiplyDeployed) {
errors.reportError(ERROR_CODE.MULTI_DEPLOYED,
- "Region " + descriptiveName + " is listed in hbase:meta on region server "
- + hbi.getMetaEntry().regionServer + " but is multiply assigned to region servers "
- + Joiner.on(", ").join(hbi.getDeployedOn()));
+ "Region " + descriptiveName + " is listed in " + MetaTableName.getInstance()
+ + " on region server " + hbi.getMetaEntry().regionServer + " but is multiply assigned"
+ + " to region servers " + Joiner.on(", ").join(hbi.getDeployedOn()));
// If we are trying to fix the errors
if (shouldFixAssignments()) {
errors.print("Trying to fix assignment error...");
@@ -2313,8 +2321,8 @@ else if (!inMeta && !inHdfs && !isDeployed) {
}
} else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) {
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META,
- "Region " + descriptiveName + " listed in hbase:meta on region server "
- + hbi.getMetaEntry().regionServer + " but found on region server "
+ "Region " + descriptiveName + " listed in " + MetaTableName.getInstance()
+ + " on region server " + hbi.getMetaEntry().regionServer + " but found on region server "
+ hbi.getDeployedOn().get(0));
// If we are trying to fix the errors
if (shouldFixAssignments()) {
@@ -2599,7 +2607,7 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
metaRegions.put(value.getReplicaId(), value);
}
}
- int metaReplication = admin.getDescriptor(TableName.META_TABLE_NAME).getRegionReplication();
+ int metaReplication = admin.getDescriptor(MetaTableName.getInstance()).getRegionReplication();
boolean noProblem = true;
// There will be always entries in regionInfoMap corresponding to hbase:meta & its replicas
// Check the deployed servers. It should be exactly one server for each replica.
@@ -2614,11 +2622,12 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
if (servers.isEmpty()) {
assignMetaReplica(i);
} else if (servers.size() > 1) {
- errors.reportError(ERROR_CODE.MULTI_META_REGION, "hbase:meta, replicaId "
- + metaHbckRegionInfo.getReplicaId() + " is found on more than one region.");
+ errors.reportError(ERROR_CODE.MULTI_META_REGION,
+ MetaTableName.getInstance() + ", replicaId " + metaHbckRegionInfo.getReplicaId()
+ + " is found on more than one region.");
if (shouldFixAssignments()) {
- errors.print("Trying to fix a problem with hbase:meta, replicaId "
- + metaHbckRegionInfo.getReplicaId() + "..");
+ errors.print("Trying to fix a problem with " + MetaTableName.getInstance()
+ + ", replicaId " + metaHbckRegionInfo.getReplicaId() + "..");
setShouldRerun();
// try fix it (treat is a dupe assignment)
HBaseFsckRepair.fixMultiAssignment(connection,
@@ -2631,11 +2640,11 @@ boolean checkMetaRegion() throws IOException, KeeperException, InterruptedExcept
for (Map.Entry entry : metaRegions.entrySet()) {
noProblem = false;
errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED,
- "hbase:meta replicas are deployed in excess. Configured " + metaReplication + ", deployed "
- + metaRegions.size());
+ MetaTableName.getInstance() + " replicas are deployed in excess. Configured "
+ + metaReplication + ", deployed " + metaRegions.size());
if (shouldFixAssignments()) {
- errors.print(
- "Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of hbase:meta..");
+ errors.print("Trying to undeploy excess replica, replicaId: " + entry.getKey() + " of "
+ + MetaTableName.getInstance() + "..");
setShouldRerun();
unassignMetaReplica(entry.getValue());
}
@@ -2655,9 +2664,9 @@ private void unassignMetaReplica(HbckRegionInfo hi)
private void assignMetaReplica(int replicaId)
throws IOException, KeeperException, InterruptedException {
errors.reportError(ERROR_CODE.NO_META_REGION,
- "hbase:meta, replicaId " + replicaId + " is not found on any region.");
+ MetaTableName.getInstance() + ", replicaId " + replicaId + " is not found on any region.");
if (shouldFixAssignments()) {
- errors.print("Trying to fix a problem with hbase:meta..");
+ errors.print("Trying to fix a problem with " + MetaTableName.getInstance() + "..");
setShouldRerun();
// try to fix it (treat it as unassigned region)
RegionInfo h = RegionReplicaUtil
@@ -2693,7 +2702,7 @@ public boolean visit(Result result) throws IOException {
if (rl == null) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
- "Empty REGIONINFO_QUALIFIER found in hbase:meta");
+ "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance());
return true;
}
ServerName sn = null;
@@ -2703,7 +2712,7 @@ public boolean visit(Result result) throws IOException {
) {
emptyRegionInfoQualifiers.add(result);
errors.reportError(ERROR_CODE.EMPTY_META_CELL,
- "Empty REGIONINFO_QUALIFIER found in hbase:meta");
+ "Empty REGIONINFO_QUALIFIER found in " + MetaTableName.getInstance());
return true;
}
RegionInfo hri = rl.getRegionLocation(RegionInfo.DEFAULT_REPLICA_ID).getRegion();
@@ -2731,7 +2740,8 @@ public boolean visit(Result result) throws IOException {
} else if (previous.getMetaEntry() == null) {
previous.setMetaEntry(m);
} else {
- throw new IOException("Two entries in hbase:meta are same " + previous);
+ throw new IOException(
+ "Two entries in " + MetaTableName.getInstance() + " are same " + previous);
}
}
List mergeParents = CatalogFamilyFormat.getMergeRegions(result.rawCells());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 6ead66c16d9e..8550220f2a5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.AsyncClusterConnection;
@@ -149,7 +150,7 @@ public static void closeRegionSilentlyAndWait(Connection connection, ServerName
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf, RegionInfo hri,
Collection servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
- Table meta = conn.getTable(TableName.META_TABLE_NAME);
+ Table meta = conn.getTable(MetaTableName.getInstance());
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random rand = ThreadLocalRandom.current();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index c1f98edd75ab..dda03bad7525 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -57,6 +57,7 @@
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
@@ -586,13 +587,13 @@ private void unloadRegions(ServerName server, List regionServers,
// For isolating hbase:meta, it should move explicitly in Ack mode,
// hence the forceMoveRegionByAck = true.
if (!metaSeverName.equals(server)) {
- LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " is on server "
- + metaSeverName + " moving to " + server);
+ LOG.info("Region of {} {} is on server {} moving to {}", MetaTableName.getInstance(),
+ metaRegionInfo.getEncodedName(), metaSeverName, server);
submitRegionMovesWhileUnloading(metaSeverName, Collections.singletonList(server),
movedRegions, Collections.singletonList(metaRegionInfo), true);
} else {
- LOG.info("Region of hbase:meta " + metaRegionInfo.getEncodedName() + " already exists"
- + " on server : " + server);
+ LOG.info("Region of {} {} already exists on server: {}", MetaTableName.getInstance(),
+ metaRegionInfo.getEncodedName(), server);
}
isolateRegionInfoList.add(RegionInfoBuilder.FIRST_META_REGIONINFO);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
index b8f095eb03df..c14cf914b497 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java
@@ -17,8 +17,7 @@
*/
package org.apache.hadoop.hbase.wal;
-import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
-
+import org.apache.hadoop.hbase.MetaTableName;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.HashMap;
@@ -37,6 +36,7 @@
import org.apache.hadoop.hbase.MetaCellComparator;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.MetaTableName;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.HFileContext;
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
@@ -78,7 +78,7 @@ public BoundedRecoveredHFilesOutputSink(WALSplitter walSplitter,
void append(RegionEntryBuffer buffer) throws IOException {
Map> familyCells = new HashMap<>();
Map familySeqIds = new HashMap<>();
- boolean isMetaTable = buffer.tableName.equals(META_TABLE_NAME);
+ boolean isMetaTable = buffer.tableName.equals(MetaTableName.getInstance());
// First iterate all Cells to find which column families are present and to stamp Cell with
// sequence id.
for (WAL.Entry entry : buffer.entryBuffer) {
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
index b965241afe2a..baaf6b68b153 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/catalogTables.jsp
@@ -20,6 +20,7 @@
<%@ page contentType="text/html;charset=UTF-8"
import="java.util.*"
+ import="org.apache.hadoop.hbase.MetaTableName"
import="org.apache.hadoop.hbase.NamespaceDescriptor"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.master.HMaster"
@@ -56,7 +57,7 @@
| <%= frags.get(tableName.getNameAsString()) != null ? frags.get(tableName.getNameAsString()) + "%" : "n/a" %> |
<% } %>
<% String description = null;
- if (tableName.equals(TableName.META_TABLE_NAME)){
+ if (tableName.equals(MetaTableName.getInstance())){
description = "The hbase:meta table holds references to all User Table regions.";
} else if (tableName.equals(CanaryTool.DEFAULT_WRITE_TABLE_NAME)){
description = "The hbase:canary table is used to sniff the write availability of"
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index d88d968e199f..d838cd3c50bd 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -41,6 +41,7 @@
import="org.apache.hadoop.hbase.ServerMetrics"
import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.Size"
+ import="org.apache.hadoop.hbase.MetaTableName"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.client.AsyncAdmin"
import="org.apache.hadoop.hbase.client.AsyncConnection"
@@ -196,7 +197,7 @@
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = !InfoServer.canUserModifyUI(request, getServletContext(), conf);
int numMetaReplicas =
- master.getTableDescriptors().get(TableName.META_TABLE_NAME).getRegionReplication();
+ master.getTableDescriptors().get(MetaTableName.getInstance()).getRegionReplication();
Map frags = null;
if (showFragmentation) {
frags = FSUtils.getTableFragmentation(master);
@@ -317,7 +318,7 @@
<% //Meta table.
- if(fqtn.equals(TableName.META_TABLE_NAME.getNameAsString())) { %>
+ if(fqtn.equals(MetaTableName.getInstance().getNameAsString())) { %>
Table Regions
@@ -653,7 +654,7 @@