diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java index 2d7aeb646db3..5f48bfc39ade 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.util.BackupUtils; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; @@ -156,7 +157,7 @@ private List getLogFilesForNewBackup(Map olderTimestamps, LOG.debug("currentLogFile: " + log.getPath().toString()); if (AbstractFSWALProvider.isMetaFile(log.getPath())) { if (LOG.isDebugEnabled()) { - LOG.debug("Skip hbase:meta log file: " + log.getPath().getName()); + LOG.debug("Skip {} log file: {}", TableName.META_TABLE_NAME, log.getPath().getName()); } continue; } diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java index 971e80e2f83a..65ddad050972 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/master/BackupLogCleaner.java @@ -204,6 +204,6 @@ private static boolean isHMasterWAL(Path path) { String fn = path.getName(); return fn.startsWith(WALProcedureStore.LOG_PREFIX) || fn.endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX) - || path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_STORE_DIR)); + || path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_REGION_DIR_NAME)); } } diff --git a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java index 02c18c73bfb5..74d49ec14c0b 100644 --- a/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java +++ b/hbase-balancer/src/main/java/org/apache/hadoop/hbase/master/SnapshotOfRegionAssignmentFromMeta.java @@ -170,7 +170,8 @@ private void processMetaRecord(Result result) throws IOException { * Initialize the region assignment snapshot by scanning the hbase:meta table */ public void initialize() throws IOException { - LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot"); + LOG.info("Start to scan {} for the current region assignment snapshot", + TableName.META_TABLE_NAME); // Scan hbase:meta to pick up user regions try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME); ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) { @@ -187,7 +188,8 @@ public void initialize() throws IOException { } } } - LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot"); + LOG.info("Finished scanning {} for the current region assignment snapshot", + TableName.META_TABLE_NAME); } private void addRegion(RegionInfo regionInfo) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java new file mode 100644 index 000000000000..2864c456755f --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ActiveClusterSuffix.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.Objects; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.base.Strings; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ActiveClusterSuffixProtos; + +/** + * The read-replica cluster id for this cluster. It is serialized to the filesystem and up into + * zookeeper. This is a container for the id. Also knows how to serialize and deserialize the + * cluster id. + */ +@InterfaceAudience.Private +public class ActiveClusterSuffix implements ClusterIdFile { + private final String cluster_id; + private final String suffix; + + public static class Parser implements ClusterIdFileParser { + + @Override + public String getFileName() { + return HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME; + } + + /** + * Parse the serialized representation of the {@link ActiveClusterSuffix} + * @param bytes A pb serialized {@link ActiveClusterSuffix} instance with pb magic prefix + * @return An instance of {@link ActiveClusterSuffix} made from bytes + * @see #toByteArray() + */ + @Override + public ActiveClusterSuffix parseFrom(byte[] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder = + ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder(); + ActiveClusterSuffixProtos.ActiveClusterSuffix cs = null; + try { + ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); + cs = builder.build(); + } catch (IOException e) { + throw new DeserializationException(e); + } + return convert(cs); + } else { + // Presume it was written out this way, the old way. + return new ActiveClusterSuffix(Bytes.toString(bytes)); + } + } + + @Override + public ActiveClusterSuffix readString(String input) { + return new ActiveClusterSuffix(input); + } + } + + public ActiveClusterSuffix(final String ci, final String suffix) { + this.cluster_id = ci; + this.suffix = suffix; + } + + public ActiveClusterSuffix(final String input) { + String[] parts = input.split(":", 2); + this.cluster_id = parts[0]; + if (parts.length > 1) { + this.suffix = parts[1]; + } else { + this.suffix = ""; + } + } + + public static ActiveClusterSuffix parseFrom(byte[] bytes) throws DeserializationException { + return new Parser().parseFrom(bytes); + } + + public static ActiveClusterSuffix fromConfig(Configuration conf, ClusterId clusterId) { + return new ActiveClusterSuffix(clusterId.toString(), conf + .get(HConstants.HBASE_META_TABLE_SUFFIX, HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE)); + } + + /** Returns The active cluster suffix serialized using pb w/ pb magic prefix */ + public byte[] toByteArray() { + return ProtobufUtil.prependPBMagic(convert().toByteArray()); + } + + /** Returns A pb instance to represent this instance. */ + public ActiveClusterSuffixProtos.ActiveClusterSuffix convert() { + return ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder().setClusterId(cluster_id) + .setSuffix(suffix).build(); + } + + /** Returns A {@link ActiveClusterSuffix} made from the passed in cs */ + public static ActiveClusterSuffix + convert(final ActiveClusterSuffixProtos.ActiveClusterSuffix cs) { + return new ActiveClusterSuffix(cs.getClusterId(), cs.getSuffix()); + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return String.format("%s:%s", this.cluster_id, + Strings.isNullOrEmpty(this.suffix) ? "" : this.suffix); + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + ActiveClusterSuffix that = (ActiveClusterSuffix) o; + return Objects.equals(cluster_id, that.cluster_id) && Objects.equals(suffix, that.suffix); + } + + @Override + public int hashCode() { + return Objects.hash(cluster_id, suffix); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 67438677dadd..19a2e95dbd8d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -31,9 +31,47 @@ * is a container for the id. Also knows how to serialize and deserialize the cluster id. */ @InterfaceAudience.Private -public class ClusterId { +public class ClusterId implements ClusterIdFile { private final String id; + public static class Parser implements ClusterIdFileParser { + + @Override + public String getFileName() { + return HConstants.CLUSTER_ID_FILE_NAME; + } + + /** + * Parse the serialized representation of the {@link ClusterId} + * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix + * @return An instance of {@link ClusterId} made from bytes + * @see #toByteArray() + */ + @Override + public ClusterId parseFrom(byte[] bytes) throws DeserializationException { + if (ProtobufUtil.isPBMagicPrefix(bytes)) { + int pblen = ProtobufUtil.lengthOfPBMagic(); + ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); + ClusterIdProtos.ClusterId cid = null; + try { + ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); + cid = builder.build(); + } catch (IOException e) { + throw new DeserializationException(e); + } + return convert(cid); + } else { + // Presume it was written out this way, the old way. + return new ClusterId(Bytes.toString(bytes)); + } + } + + @Override + public ClusterId readString(String input) { + return new ClusterId(input); + } + } + /** * New ClusterID. Generates a uniqueid. */ @@ -50,30 +88,6 @@ public byte[] toByteArray() { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } - /** - * Parse the serialized representation of the {@link ClusterId} - * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix - * @return An instance of {@link ClusterId} made from bytes - * @see #toByteArray() - */ - public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { - if (ProtobufUtil.isPBMagicPrefix(bytes)) { - int pblen = ProtobufUtil.lengthOfPBMagic(); - ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); - ClusterIdProtos.ClusterId cid = null; - try { - ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen); - cid = builder.build(); - } catch (IOException e) { - throw new DeserializationException(e); - } - return convert(cid); - } else { - // Presume it was written out this way, the old way. - return new ClusterId(Bytes.toString(bytes)); - } - } - /** Returns A pb instance to represent this instance. */ public ClusterIdProtos.ClusterId convert() { ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterIdFile.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterIdFile.java new file mode 100644 index 000000000000..8b3aec7d552f --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterIdFile.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Represents a cluster identification file on the master file system. e.g. Cluster ID = hbase.id + * Active read-replica cluster ID = active.cluster.suffix.id + */ +@InterfaceAudience.Private +public interface ClusterIdFile { + + /** + * Return file contents in a byte array. + */ + byte[] toByteArray(); + +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterIdFileParser.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterIdFileParser.java new file mode 100644 index 000000000000..39f34bdac94b --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterIdFileParser.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * Generic parser interface for Cluster Id files. + * @see ClusterIdFile + */ +@InterfaceAudience.Private +public interface ClusterIdFileParser { + + /** + * Get default file name of cluster id file. + */ + String getFileName(); + + /** + * Parse cluster id data from byte representation. + * @param bytes the protobuf data + * @return the cluster id data object + */ + T parseFrom(final byte[] bytes) throws DeserializationException; + + /** + * Parser cluster id data from String representation. + * @param input the input string + * @return the cluster id data object + */ + T readString(String input); +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 65b3abcd413c..7370983d01a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -2703,6 +2703,34 @@ List getLogEntries(Set serverNames, String logType, Server */ List getCachedFilesList(ServerName serverName) throws IOException; + /** + * Perform hbase:meta table refresh + */ + Long refreshMeta() throws IOException; + + /** + * Refresh HFiles for the table + * @param tableName table to refresh HFiles for + * @return ID of the procedure started for refreshing HFiles + * @throws IOException if a remote or network exception occurs + */ + Long refreshHFiles(final TableName tableName) throws IOException; + + /** + * Refresh HFiles for all the tables under given namespace + * @param namespace Namespace for which we should call refresh HFiles for all tables under it + * @return ID of the procedure started for refreshing HFiles + * @throws IOException if a remote or network exception occurs + */ + Long refreshHFiles(final String namespace) throws IOException; + + /** + * Refresh HFiles for all the tables + * @return ID of the procedure started for refreshing HFiles + * @throws IOException if a remote or network exception occurs + */ + Long refreshHFiles() throws IOException; + @InterfaceAudience.Private void restoreBackupSystemTable(String snapshotName) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 7117fd4fd33f..ca0ffb67329f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -1153,6 +1153,26 @@ public List getCachedFilesList(ServerName serverName) throws IOException return get(admin.getCachedFilesList(serverName)); } + @Override + public Long refreshMeta() throws IOException { + return get(admin.refreshMeta()); + } + + @Override + public Long refreshHFiles(final TableName tableName) throws IOException { + return get(admin.refreshHFiles(tableName)); + } + + @Override + public Long refreshHFiles(final String namespace) throws IOException { + return get(admin.refreshHFiles(namespace)); + } + + @Override + public Long refreshHFiles() throws IOException { + return get(admin.refreshHFiles()); + } + @Override public void restoreBackupSystemTable(String snapshotName) throws IOException { get(admin.restoreBackupSystemTable(snapshotName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 56211cedc493..114d103ce03c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1890,6 +1890,26 @@ CompletableFuture> getLogEntries(Set serverNames, Str */ CompletableFuture> getCachedFilesList(ServerName serverName); + /** + * Perform hbase:meta table refresh + */ + CompletableFuture refreshMeta(); + + /** + * Refresh HFiles for the table + */ + CompletableFuture refreshHFiles(final TableName tableName); + + /** + * Refresh HFiles for all the tables under given namespace + */ + CompletableFuture refreshHFiles(final String namespace); + + /** + * Refresh HFiles for all the tables + */ + CompletableFuture refreshHFiles(); + @InterfaceAudience.Private CompletableFuture restoreBackupSystemTable(String snapshotName); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index 8132b184809c..4cf02d91f0e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1021,6 +1021,26 @@ public CompletableFuture> getCachedFilesList(ServerName serverName) return wrap(rawAdmin.getCachedFilesList(serverName)); } + @Override + public CompletableFuture refreshMeta() { + return wrap(rawAdmin.refreshMeta()); + } + + @Override + public CompletableFuture refreshHFiles(final TableName tableName) { + return wrap(rawAdmin.refreshHFiles(tableName)); + } + + @Override + public CompletableFuture refreshHFiles(final String namespace) { + return wrap(rawAdmin.refreshHFiles(namespace)); + } + + @Override + public CompletableFuture refreshHFiles() { + return wrap(rawAdmin.refreshHFiles()); + } + @Override public CompletableFuture restoreBackupSystemTable(String snapshotName) { return wrap(rawAdmin.restoreBackupSystemTable(snapshotName)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index ea51d27b99a4..58409251cef3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -263,6 +263,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RecommissionRegionServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshHFilesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RefreshMetaResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ReopenTableRegionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest; @@ -4697,4 +4701,63 @@ MasterProtos.RestoreBackupSystemTableResponse> procedureCall(request, MasterProtos.RestoreBackupSystemTableResponse::getProcId, new RestoreBackupSystemTableProcedureBiConsumer()); } + + private CompletableFuture internalRefershHFiles(RefreshHFilesRequest request) { + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, request, MasterService.Interface::refreshHFiles, + RefreshHFilesResponse::getProcId)) + .call(); + } + + @Override + public CompletableFuture refreshMeta() { + RefreshMetaRequest.Builder request = RefreshMetaRequest.newBuilder(); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, request.build(), MasterService.Interface::refreshMeta, + RefreshMetaResponse::getProcId)) + .call(); + } + + @Override + public CompletableFuture refreshHFiles(final TableName tableName) { + if (tableName.isSystemTable()) { + LOG.warn("Refreshing HFiles for system table {} is not allowed", tableName.getNameAsString()); + throw new IllegalArgumentException( + "Not allowed to refresh HFiles for system table '" + tableName.getNameAsString() + "'"); + } + // Request builder + RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder(); + request.setTableName(ProtobufUtil.toProtoTableName(tableName)); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return internalRefershHFiles(request.build()); + } + + @Override + public CompletableFuture refreshHFiles(final String namespace) { + if ( + namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR) + || namespace.equals(NamespaceDescriptor.BACKUP_NAMESPACE_NAME_STR) + ) { + LOG.warn("Refreshing HFiles for reserve namespace {} is not allowed", namespace); + throw new IllegalArgumentException( + "Not allowed to refresh HFiles for reserve namespace '" + namespace + "'"); + } + // Request builder + RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder(); + request.setNamespace(namespace); + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return internalRefershHFiles(request.build()); + } + + @Override + public CompletableFuture refreshHFiles() { + // Request builder + RefreshHFilesRequest.Builder request = RefreshHFilesRequest.newBuilder(); + // Set nonce + request.setNonceGroup(ng.getNonceGroup()).setNonce(ng.newNonce()); + return internalRefershHFiles(request.build()); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 10c554e26f79..82b8711b7762 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -431,7 +431,7 @@ static byte[] toByteArray(RegionInfo ri) { */ static String prettyPrint(final String encodedRegionName) { if (encodedRegionName.equals("1028785192")) { - return encodedRegionName + "/hbase:meta"; + return encodedRegionName + "/" + TableName.META_TABLE_NAME; } return encodedRegionName; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java index d31c0090f568..40ecc7694cab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZKConnectionRegistry.java @@ -117,7 +117,7 @@ private static String getClusterId(byte[] data) throws DeserializationException return null; } data = removeMetaData(data); - return ClusterId.parseFrom(data).toString(); + return new ClusterId.Parser().parseFrom(data).toString(); } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 9af711e7edfd..4ed6d3dc9d58 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1665,6 +1665,31 @@ public enum OperationStatusCode { */ public final static boolean REJECT_DECOMMISSIONED_HOSTS_DEFAULT = false; + /** + * Adds a suffix to the meta table name: value=’test’ -> ‘hbase:meta_test’ Added in HBASE-XXXXX to + * support having multiple hbase:meta tables (with distinct names )to enable storage sharing by + * more than one clusters. + */ + public final static String HBASE_META_TABLE_SUFFIX = "hbase.meta.table.suffix"; + + /** + * Default value of {@link #HBASE_META_TABLE_SUFFIX} + */ + public final static String HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE = ""; + + /** + * Should HBase only serve Read Requests + */ + public final static String HBASE_GLOBAL_READONLY_ENABLED_KEY = "hbase.global.readonly.enabled"; + + /** + * Default value of {@link #HBASE_GLOBAL_READONLY_ENABLED_KEY} + */ + public final static boolean HBASE_GLOBAL_READONLY_ENABLED_DEFAULT = false; + + /** name of the file having active cluster suffix */ + public static final String ACTIVE_CLUSTER_SUFFIX_FILE_NAME = "active.cluster.suffix.id"; + private HConstants() { // Can't be instantiated with this ctor. } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index b6d854c13784..55df897a12c9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -17,16 +17,21 @@ */ package org.apache.hadoop.hbase; +import com.google.errorprone.annotations.RestrictedApi; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Set; import java.util.concurrent.CopyOnWriteArraySet; import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.base.Strings; /** * Immutable POJO class for representing a table name. Which is of the form: <table @@ -44,6 +49,7 @@ */ @InterfaceAudience.Public public final class TableName implements Comparable { + private static final Logger LOG = LoggerFactory.getLogger(TableName.class); /** See {@link #createTableNameIfNecessary(ByteBuffer, ByteBuffer)} */ private static final Set tableCache = new CopyOnWriteArraySet<>(); @@ -64,10 +70,42 @@ public final class TableName implements Comparable { // with NAMESPACE_DELIM as delimiter public static final String VALID_USER_TABLE_REGEX = "(?:(?:(?:" + VALID_NAMESPACE_REGEX + "\\" + NAMESPACE_DELIM + ")?)" + "(?:" + VALID_TABLE_QUALIFIER_REGEX + "))"; + public static final String VALID_META_TABLE_SUFFIX_REGEX = "[a-zA-Z0-9]+"; - /** The hbase:meta table's name. */ - public static final TableName META_TABLE_NAME = - valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + /** + * The name of hbase meta table could either be hbase:meta_xxx or 'hbase:meta' otherwise. Config + * hbase.meta.table.suffix will govern the decision of adding suffix to the habase:meta + */ + public static final TableName META_TABLE_NAME; + static { + Configuration conf = HBaseConfiguration.create(); + META_TABLE_NAME = initializeHbaseMetaTableName(conf); + LOG.info("Meta table name: {}", META_TABLE_NAME); + } + + /* Visible for testing only */ + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public static TableName getDefaultNameOfMetaForReplica() { + return valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + } + + public static TableName initializeHbaseMetaTableName(Configuration conf) { + String suffix_val = conf.get(HConstants.HBASE_META_TABLE_SUFFIX, + HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE); + LOG.debug("[Read-replica feature] suffix value: {}", + (suffix_val == null || suffix_val.isEmpty()) ? "" : suffix_val); + if (Strings.isNullOrEmpty(suffix_val)) { + return valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta"); + } else { + if (!suffix_val.matches(VALID_META_TABLE_SUFFIX_REGEX)) { + throw new IllegalArgumentException("Invalid value '" + suffix_val + "' for config '" + + HConstants.HBASE_META_TABLE_SUFFIX + "'. Suffix must only contain ASCII letters and " + + "digits matching: " + VALID_META_TABLE_SUFFIX_REGEX); + } + return valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta_" + suffix_val); + } + } /** * The Namespace table's name. diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTableName.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTableName.java index d9281d8953e8..f145842a23d1 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTableName.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestTableName.java @@ -25,6 +25,7 @@ import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -164,4 +165,27 @@ private TableName validateNames(TableName expected, Names names) { assertArrayEquals(expected.getNamespace(), names.nsb); return expected; } + + @Test + public void testValidMetaTableSuffix() { + String[] validSuffixes = { "REPL1", "123", "123abc" }; + for (String suffix : validSuffixes) { + Configuration conf = HBaseConfiguration.create(); + conf.set(HConstants.HBASE_META_TABLE_SUFFIX, suffix); + TableName metaTableName = TableName.initializeHbaseMetaTableName(conf); + assertEquals("hbase:meta_" + suffix, metaTableName.getNameAsString()); + } + } + + @Test + public void testInvalidMetaTableSuffix() { + String[] invalidSuffixes = { "test_1", "test-1", "test.1", "test 1", "_test", "-test", ".test", + "has!special", "has:colon", " " }; + for (String suffix : invalidSuffixes) { + Configuration conf = HBaseConfiguration.create(); + conf.set(HConstants.HBASE_META_TABLE_SUFFIX, suffix); + assertThrows("Expected IllegalArgumentException for suffix: " + suffix, + IllegalArgumentException.class, () -> TableName.initializeHbaseMetaTableName(conf)); + } + } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 53ed8a25ed0e..579171e1c3d7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.master; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @@ -49,7 +50,7 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_SIZE_NAME = "hlogSplitSize"; String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()"; - String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split"; + String META_SPLIT_SIZE_DESC = "Size of " + TableName.META_TABLE_NAME + " WAL files being split"; String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto b/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto new file mode 100644 index 000000000000..7f25a040ad03 --- /dev/null +++ b/hbase-protocol-shaded/src/main/protobuf/server/ActiveClusterSuffix.proto @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +syntax = "proto2"; +// This file contains protocol buffers that are shared throughout HBase +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated"; +option java_outer_classname = "ActiveClusterSuffixProtos"; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +/** + * Content of the '/hbase/active_cluster_suffix.id' file to indicate the active cluster. + */ +message ActiveClusterSuffix { + // This is the active cluster id set by the user in the config, as a String + required string cluster_id = 1; + + // This is the active cluster suffix set by the user in the config, as a String + required string suffix = 2; +} diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index f475d26060d0..c774a93605ab 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -807,6 +807,17 @@ message ModifyColumnStoreFileTrackerResponse { optional uint64 proc_id = 1; } +message RefreshHFilesRequest { + optional TableName table_name = 1; + optional string namespace = 2; + optional uint64 nonce_group = 3 [default = 0]; + optional uint64 nonce = 4 [default = 0]; +} + +message RefreshHFilesResponse { + optional uint64 proc_id = 1; +} + message FlushMasterStoreRequest {} message FlushMasterStoreResponse {} @@ -819,6 +830,14 @@ message RollAllWALWritersResponse { optional uint64 proc_id = 1; } +message RefreshMetaRequest { + optional uint64 nonce_group = 1 [default = 0]; + optional uint64 nonce = 2 [default = 0]; +} +message RefreshMetaResponse { + optional uint64 proc_id = 1; +} + service MasterService { /** Used by the client to get the number of regions that have received the updated schema */ rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest) @@ -1303,6 +1322,12 @@ service MasterService { rpc rollAllWALWriters(RollAllWALWritersRequest) returns(RollAllWALWritersResponse); + + rpc RefreshMeta(RefreshMetaRequest) + returns(RefreshMetaResponse); + + rpc RefreshHFiles(RefreshHFilesRequest) + returns(RefreshHFilesResponse); } // HBCK Service definitions. diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto index 7e6c6c8e2fc7..56086aed29e3 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/MasterProcedure.proto @@ -864,3 +864,34 @@ message LogRollRemoteProcedureResult { optional ServerName server_name = 1; optional uint64 last_highest_wal_filenum = 2; } + +enum RefreshMetaState { + REFRESH_META_INIT = 1; + REFRESH_META_SCAN_STORAGE = 2; + REFRESH_META_PREPARE = 3; + REFRESH_META_APPLY = 4; + REFRESH_META_FOLLOWUP = 5; + REFRESH_META_FINISH = 6; +} + +message RefreshMetaStateData { +} + +enum RefreshHFilesTableProcedureState { + REFRESH_HFILES_PREPARE = 1; + REFRESH_HFILES_REFRESH_REGION = 2; + REFRESH_HFILES_FINISH = 3; +} + +message RefreshHFilesTableProcedureStateData { + optional TableName table_name = 1; + optional string namespace_name = 2; +} + +message RefreshHFilesRegionProcedureStateData { + required RegionInfo region = 1; +} + +message RefreshHFilesRegionParameter { + required RegionInfo region = 1; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 05b049e27dbc..92d6a18486a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -753,7 +753,7 @@ private static void deleteFromMetaTable(final Connection connection, final Delet * @param connection connection we're using * @param deletes Deletes to add to hbase:meta This list should support #remove. */ - private static void deleteFromMetaTable(final Connection connection, final List deletes) + public static void deleteFromMetaTable(final Connection connection, final List deletes) throws IOException { try (Table t = getMetaHTable(connection)) { debugLogMutations(deletes); @@ -859,7 +859,7 @@ public static void addRegionsToMeta(Connection connection, List regi private static void updateTableState(Connection connection, TableState state) throws IOException { Put put = makePutFromTableState(state, EnvironmentEdgeManager.currentTime()); putToMetaTable(connection, put); - LOG.info("Updated {} in hbase:meta", state); + LOG.info("Updated {} in {}", state, TableName.META_TABLE_NAME); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index d22e46383d30..32594ffce489 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -78,4 +78,11 @@ default void update(TableDescriptor htd) throws IOException { /** Returns Instance of table descriptor or null if none found. */ TableDescriptor remove(TableName tablename) throws IOException; + + /** + * Invalidates the table descriptor cache. + */ + default void invalidateTableDescriptorCache() { + // do nothing by default + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index 3d5897c0a056..625ac1dc5842 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -36,6 +36,8 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; @@ -130,6 +132,39 @@ public Set getCoprocessorClassNames() { return returnValue; } + /** + * Used to help make the relevant loaded coprocessors dynamically configurable by registering them + * to the {@link ConfigurationManager}. Coprocessors are considered "relevant" if they implement + * the {@link ConfigurationObserver} interface. + * @param configurationManager the ConfigurationManager the coprocessors get registered to + */ + public void registerConfigurationObservers(ConfigurationManager configurationManager) { + Coprocessor foundCp; + Set coprocessors = this.getCoprocessors(); + for (String cp : coprocessors) { + foundCp = this.findCoprocessor(cp); + if (foundCp instanceof ConfigurationObserver) { + configurationManager.registerObserver((ConfigurationObserver) foundCp); + } + } + } + + /** + * Deregisters relevant coprocessors from the {@link ConfigurationManager}. Coprocessors are + * considered "relevant" if they implement the {@link ConfigurationObserver} interface. + * @param configurationManager the ConfigurationManager the coprocessors get deregistered from + */ + public void deregisterConfigurationObservers(ConfigurationManager configurationManager) { + Coprocessor foundCp; + Set coprocessors = this.getCoprocessors(); + for (String cp : coprocessors) { + foundCp = this.findCoprocessor(cp); + if (foundCp instanceof ConfigurationObserver) { + configurationManager.deregisterObserver((ConfigurationObserver) foundCp); + } + } + } + /** * Load system coprocessors once only. Read the class names from configuration. Called by * constructor. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorReloadTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorReloadTask.java new file mode 100644 index 000000000000..0b826de333d6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorReloadTask.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; + +/** + * This interface is used to perform whatever task is necessary for reloading coprocessors on + * HMaster, HRegionServer, and HRegion. Since the steps required to reload coprocessors varies for + * each of these types, this interface helps with code flexibility by allowing a lamda function to + * be provided for the {@link #reload(Configuration) reload()} method.
+ *
+ * See {@link org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil#maybeUpdateCoprocessors + * CoprocessorConfigurationUtil.maybeUpdateCoprocessors()} and its usage in + * {@link org.apache.hadoop.hbase.conf.ConfigurationObserver#onConfigurationChange + * onConfigurationChange()} with HMaster, HRegionServer, and HRegion for an idea of how this + * interface is helpful. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +@FunctionalInterface +public interface CoprocessorReloadTask { + void reload(Configuration conf); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java index 61c983468876..b6950f4c2e85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterCoprocessorEnvironment.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -88,4 +89,6 @@ public interface MasterCoprocessorEnvironment extends CoprocessorEnvironment * RS_LOG_ROLL */ - RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL); + RS_LOG_ROLL(91, ExecutorType.RS_LOG_ROLL), + + /** + * RS refresh hfiles for a region.
+ * RS_REFRESH_HFILES + */ + RS_REFRESH_HFILES(92, ExecutorType.RS_REFRESH_HFILES); private final int code; private final ExecutorType executor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index 668cd701c0d9..e2d357fbee61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -57,7 +57,8 @@ public enum ExecutorType { RS_SNAPSHOT_OPERATIONS(36), RS_FLUSH_OPERATIONS(37), RS_RELOAD_QUOTAS_OPERATIONS(38), - RS_LOG_ROLL(39); + RS_LOG_ROLL(39), + RS_REFRESH_HFILES(39); ExecutorType(int value) { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java index 67d8ef80ce69..9cb2138642c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CachedClusterId.java @@ -102,7 +102,7 @@ private boolean attemptFetch() { // the waiting threads. try { cacheMisses.incrementAndGet(); - setClusterId(FSUtils.getClusterId(fs, rootDir)); + setClusterId(FSUtils.getClusterIdFile(fs, rootDir, new ClusterId.Parser())); } catch (IOException e) { LOG.warn("Error fetching cluster ID", e); } finally { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index a0f84c5672c3..8fd133f64f68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.master; import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; +import static org.apache.hadoop.hbase.HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT; +import static org.apache.hadoop.hbase.HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY; import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.master.cleaner.HFileCleaner.CUSTOM_POOL_SIZE; @@ -84,6 +86,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.PleaseRestartMasterException; import org.apache.hadoop.hbase.RegionMetrics; @@ -170,6 +173,8 @@ import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher; +import org.apache.hadoop.hbase.master.procedure.RefreshHFilesTableProcedure; +import org.apache.hadoop.hbase.master.procedure.RefreshMetaProcedure; import org.apache.hadoop.hbase.master.procedure.ReloadQuotasProcedure; import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; @@ -248,10 +253,12 @@ import org.apache.hadoop.hbase.security.SecurityConstants; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.access.AbstractReadOnlyController; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.ConfigurationUtil; import org.apache.hadoop.hbase.util.CoprocessorConfigurationUtil; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -493,6 +500,8 @@ public class HMaster extends HBaseServerBase implements Maste public static final String WARMUP_BEFORE_MOVE = "hbase.master.warmup.before.move"; private static final boolean DEFAULT_WARMUP_BEFORE_MOVE = true; + private volatile boolean isGlobalReadOnlyEnabled; + /** * Use RSProcedureDispatcher instance to initiate master -> rs remote procedure execution. Use * this config to extend RSProcedureDispatcher (mainly for testing purpose). @@ -578,6 +587,8 @@ public HMaster(final Configuration conf) throws IOException { getChoreService().scheduleChore(clusterStatusPublisherChore); } } + this.isGlobalReadOnlyEnabled = + conf.getBoolean(HBASE_GLOBAL_READONLY_ENABLED_KEY, HBASE_GLOBAL_READONLY_ENABLED_DEFAULT); this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this); cachedClusterId = new CachedClusterId(this, conf); this.regionServerTracker = new RegionServerTracker(zooKeeper, this); @@ -611,6 +622,12 @@ protected String getUseThisHostnameInstead(Configuration conf) { private void registerConfigurationObservers() { configurationManager.registerObserver(this.rpcServices); configurationManager.registerObserver(this); + if (cpHost != null) { + cpHost.registerConfigurationObservers(configurationManager); + } else { + LOG.warn("Could not register HMaster coprocessors to the ConfigurationManager because " + + "MasterCoprocessorHost is null"); + } } // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will @@ -619,7 +636,6 @@ private void registerConfigurationObservers() { public void run() { try { installShutdownHook(); - registerConfigurationObservers(); Threads.setDaemonThreadRunning(new Thread(TraceUtil.tracedRunnable(() -> { try { int infoPort = putUpJettyServer(); @@ -1080,7 +1096,11 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE if (!maintenanceMode) { startupTaskGroup.addTask("Initializing master coprocessors"); setQuotasObserver(conf); - this.cpHost = new MasterCoprocessorHost(this, conf); + CoprocessorConfigurationUtil.syncReadOnlyConfigurations(conf, + CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY); + AbstractReadOnlyController.manageActiveClusterIdFile( + ConfigurationUtil.isReadOnlyModeEnabled(conf), this.getMasterFileSystem()); + initializeCoprocessorHost(conf); } else { // start an in process region server for carrying system regions maintenanceRegionServer = @@ -1182,8 +1202,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE int existingReplicasCount = assignmentManager.getRegionStates().getRegionsOfTable(TableName.META_TABLE_NAME).size(); if (existingReplicasCount > metaDesc.getRegionReplication()) { - LOG.info("Update replica count of hbase:meta from {}(in TableDescriptor)" - + " to {}(existing ZNodes)", metaDesc.getRegionReplication(), existingReplicasCount); + LOG.info( + "Update replica count of {} from {}(in TableDescriptor)" + " to {}(existing ZNodes)", + TableName.META_TABLE_NAME, metaDesc.getRegionReplication(), existingReplicasCount); metaDesc = TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(existingReplicasCount).build(); tableDescriptors.update(metaDesc); @@ -1192,8 +1213,9 @@ private void finishActiveMasterInitialization() throws IOException, InterruptedE if (metaDesc.getRegionReplication() != replicasNumInConf) { LOG.info( "The {} config is {} while the replica count in TableDescriptor is {}" - + " for hbase:meta, altering...", - HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication()); + + " for {}, altering...", + HConstants.META_REPLICAS_NUM, replicasNumInConf, metaDesc.getRegionReplication(), + TableName.META_TABLE_NAME); procedureExecutor.submitProcedure(new ModifyTableProcedure( procedureExecutor.getEnvironment(), TableDescriptorBuilder.newBuilder(metaDesc) .setRegionReplication(replicasNumInConf).build(), @@ -3129,8 +3151,8 @@ public ClusterMetrics getClusterMetricsWithoutCoprocessor(EnumSet