Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
27cd35a
HBASE-29082: Support for custom meta table name suffix (#6632)
kabhishek4 Mar 11, 2025
ea9fd6c
HBASE-29083: Add global read-only mode to HBase (#6757)
sharmaar12 Apr 22, 2025
93bd974
HBASE-29236: Add Support for Dynamic Configuration at the Coprocessor…
kgeisz May 29, 2025
69448b9
HBASE-29228 Add support to prevent running multiple active clusters (…
kabhishek4 Jun 3, 2025
8297cc5
HBASE-29291: Add a command to refresh/sync hbase:meta table (#7058)
Kota-SH Sep 11, 2025
1ba1152
HBASE-29328: Implement new HBase command: refresh_hfiles (#7149)
sharmaar12 Sep 15, 2025
f040614
HBASE-29579: AssignmentManager is trying to pick up the other cluster…
Kota-SH Sep 19, 2025
1912562
HBASE-29597 Supply meta table name for replica to the tests in TestMe…
kabhishek4 Sep 19, 2025
4f77099
HBASE-29621: Remove the leading whitespace in the active.cluster.suff…
Kota-SH Sep 29, 2025
67c3320
HBASE-29580: Clean-up hardcoded meta table names from log entries (#7…
kgeisz Sep 30, 2025
6ffba71
HBASE-29594: Add suffix to Master Region data directory (#7330)
Kota-SH Oct 20, 2025
cea3652
HBASE-29611: With FILE based SFT, the list of HFiles we maintain in .…
sharmaar12 Oct 27, 2025
1685bf7
HBASE-29644: Refresh_meta triggering compaction on user table (#7385)
sharmaar12 Nov 13, 2025
a0b5f9f
HBASE-29642 Active cluster file is not being updated after promoting …
kabhishek4 Nov 21, 2025
4a800c4
HBASE-29693: Implement the missing observer functions in the read-onl…
sharmaar12 Dec 8, 2025
adc551b
HBASE-29715: AssignmentManager is trying to pick up the active cluste…
kgeisz Dec 15, 2025
9c5777d
HBASE-29778: Abort the retry operation if not allowed in read-only mo…
sharmaar12 Dec 18, 2025
8033e41
HBASE-29779: Call super coprocessor instead of returning for system t…
sharmaar12 Jan 9, 2026
e4f9ede
HBASE-29780: Addendum to HBASE-29715: Add an additional test case tha…
kgeisz Feb 4, 2026
7333bd2
HBASE-29841: Split bulky ReadOnlyController into multiple smaller con…
sharmaar12 Feb 6, 2026
285a77a
HBASE-29756: Programmatically register related co-processor during in…
sharmaar12 Feb 27, 2026
fc108e9
HBASE-29961 Secondary cluster is unable to replayWAL for meta (#7854)
sharmaar12 Mar 9, 2026
89bebf4
HBASE-29959 Cluster started in read-only mode mistakenly deletes suff…
sharmaar12 Mar 11, 2026
dbe796c
Build fix
anmolnar Mar 13, 2026
f9fc7e9
Spotless apply
anmolnar Mar 13, 2026
e8434af
Spotbugs check
anmolnar Mar 13, 2026
8bf1aa0
HBASE-29992: Implement regex check for configured replica suffix (#7923)
Kota-SH Mar 23, 2026
b808b90
HBASE-30014: refresh_meta not working due to regionNode lock (#7974)
Kota-SH Mar 25, 2026
17a943a
HBASE-29960 java.lang.IllegalStateException: Should not call create w…
sharmaar12 Mar 26, 2026
22b752b
HBASE-29958 Improve log messages (#7922)
sharmaar12 Mar 26, 2026
6ad2a57
HBASE-29965: Unable to dynamically change readonly flag (#7964)
kgeisz Mar 30, 2026
a43467d
HBASE-29993. Refactor ClusterId/ActiveClusterSuffix reading and writi…
anmolnar Apr 8, 2026
b3df7c5
HBASE-29081. Fix build errors
anmolnar Apr 8, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
Expand Down Expand Up @@ -156,7 +157,7 @@ private List<String> getLogFilesForNewBackup(Map<String, Long> olderTimestamps,
LOG.debug("currentLogFile: " + log.getPath().toString());
if (AbstractFSWALProvider.isMetaFile(log.getPath())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skip hbase:meta log file: " + log.getPath().getName());
LOG.debug("Skip {} log file: {}", TableName.META_TABLE_NAME, log.getPath().getName());
}
continue;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,6 @@ private static boolean isHMasterWAL(Path path) {
String fn = path.getName();
return fn.startsWith(WALProcedureStore.LOG_PREFIX)
|| fn.endsWith(MasterRegionFactory.ARCHIVED_WAL_SUFFIX)
|| path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_STORE_DIR));
|| path.toString().contains("/%s/".formatted(MasterRegionFactory.MASTER_REGION_DIR_NAME));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ private void processMetaRecord(Result result) throws IOException {
* Initialize the region assignment snapshot by scanning the hbase:meta table
*/
public void initialize() throws IOException {
LOG.info("Start to scan the hbase:meta for the current region assignment " + "snappshot");
LOG.info("Start to scan {} for the current region assignment snapshot",
TableName.META_TABLE_NAME);
// Scan hbase:meta to pick up user regions
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME);
ResultScanner scanner = metaTable.getScanner(HConstants.CATALOG_FAMILY)) {
Expand All @@ -187,7 +188,8 @@ public void initialize() throws IOException {
}
}
}
LOG.info("Finished to scan the hbase:meta for the current region assignment" + "snapshot");
LOG.info("Finished scanning {} for the current region assignment snapshot",
TableName.META_TABLE_NAME);
}

private void addRegion(RegionInfo regionInfo) {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import java.io.IOException;
import java.util.Objects;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;

import org.apache.hbase.thirdparty.com.google.common.base.Strings;

import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ActiveClusterSuffixProtos;

/**
* The read-replica cluster id for this cluster. It is serialized to the filesystem and up into
* zookeeper. This is a container for the id. Also knows how to serialize and deserialize the
* cluster id.
*/
@InterfaceAudience.Private
public class ActiveClusterSuffix implements ClusterIdFile {
private final String cluster_id;
private final String suffix;

public static class Parser implements ClusterIdFileParser<ActiveClusterSuffix> {

@Override
public String getFileName() {
return HConstants.ACTIVE_CLUSTER_SUFFIX_FILE_NAME;
}

/**
* Parse the serialized representation of the {@link ActiveClusterSuffix}
* @param bytes A pb serialized {@link ActiveClusterSuffix} instance with pb magic prefix
* @return An instance of {@link ActiveClusterSuffix} made from <code>bytes</code>
* @see #toByteArray()
*/
@Override
public ActiveClusterSuffix parseFrom(byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ActiveClusterSuffixProtos.ActiveClusterSuffix.Builder builder =
ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder();
ActiveClusterSuffixProtos.ActiveClusterSuffix cs = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cs = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cs);
} else {
// Presume it was written out this way, the old way.
return new ActiveClusterSuffix(Bytes.toString(bytes));
}
}

@Override
public ActiveClusterSuffix readString(String input) {
return new ActiveClusterSuffix(input);
}
}

public ActiveClusterSuffix(final String ci, final String suffix) {
this.cluster_id = ci;
this.suffix = suffix;
}

public ActiveClusterSuffix(final String input) {
String[] parts = input.split(":", 2);
this.cluster_id = parts[0];
if (parts.length > 1) {
this.suffix = parts[1];
} else {
this.suffix = "";
}
}

public static ActiveClusterSuffix parseFrom(byte[] bytes) throws DeserializationException {
return new Parser().parseFrom(bytes);
}

public static ActiveClusterSuffix fromConfig(Configuration conf, ClusterId clusterId) {
return new ActiveClusterSuffix(clusterId.toString(), conf
.get(HConstants.HBASE_META_TABLE_SUFFIX, HConstants.HBASE_META_TABLE_SUFFIX_DEFAULT_VALUE));
}

/** Returns The active cluster suffix serialized using pb w/ pb magic prefix */
public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}

/** Returns A pb instance to represent this instance. */
public ActiveClusterSuffixProtos.ActiveClusterSuffix convert() {
return ActiveClusterSuffixProtos.ActiveClusterSuffix.newBuilder().setClusterId(cluster_id)
.setSuffix(suffix).build();
}

/** Returns A {@link ActiveClusterSuffix} made from the passed in <code>cs</code> */
public static ActiveClusterSuffix
convert(final ActiveClusterSuffixProtos.ActiveClusterSuffix cs) {
return new ActiveClusterSuffix(cs.getClusterId(), cs.getSuffix());
}

/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return String.format("%s:%s", this.cluster_id,
Strings.isNullOrEmpty(this.suffix) ? "<blank>" : this.suffix);
}

@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ActiveClusterSuffix that = (ActiveClusterSuffix) o;
return Objects.equals(cluster_id, that.cluster_id) && Objects.equals(suffix, that.suffix);
}

@Override
public int hashCode() {
return Objects.hash(cluster_id, suffix);
}
}
64 changes: 39 additions & 25 deletions hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,47 @@
* is a container for the id. Also knows how to serialize and deserialize the cluster id.
*/
@InterfaceAudience.Private
public class ClusterId {
public class ClusterId implements ClusterIdFile {
private final String id;

public static class Parser implements ClusterIdFileParser<ClusterId> {

@Override
public String getFileName() {
return HConstants.CLUSTER_ID_FILE_NAME;
}

/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
@Override
public ClusterId parseFrom(byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}

@Override
public ClusterId readString(String input) {
return new ClusterId(input);
}
}

/**
* New ClusterID. Generates a uniqueid.
*/
Expand All @@ -50,30 +88,6 @@ public byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(convert().toByteArray());
}

/**
* Parse the serialized representation of the {@link ClusterId}
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
ClusterIdProtos.ClusterId cid = null;
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
cid = builder.build();
} catch (IOException e) {
throw new DeserializationException(e);
}
return convert(cid);
} else {
// Presume it was written out this way, the old way.
return new ClusterId(Bytes.toString(bytes));
}
}

/** Returns A pb instance to represent this instance. */
public ClusterIdProtos.ClusterId convert() {
ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder();
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import org.apache.yetus.audience.InterfaceAudience;

/**
* Represents a cluster identification file on the master file system. e.g. Cluster ID = hbase.id
* Active read-replica cluster ID = active.cluster.suffix.id
*/
@InterfaceAudience.Private
public interface ClusterIdFile {

/**
* Return file contents in a byte array.
*/
byte[] toByteArray();

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;

import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Generic parser interface for Cluster Id files.
* @see ClusterIdFile
*/
@InterfaceAudience.Private
public interface ClusterIdFileParser<T> {

/**
* Get default file name of cluster id file.
*/
String getFileName();

/**
* Parse cluster id data from byte representation.
* @param bytes the protobuf data
* @return the cluster id data object
*/
T parseFrom(final byte[] bytes) throws DeserializationException;

/**
* Parser cluster id data from String representation.
* @param input the input string
* @return the cluster id data object
*/
T readString(String input);
}
Original file line number Diff line number Diff line change
Expand Up @@ -2703,6 +2703,34 @@ List<LogEntry> getLogEntries(Set<ServerName> serverNames, String logType, Server
*/
List<String> getCachedFilesList(ServerName serverName) throws IOException;

/**
* Perform hbase:meta table refresh
*/
Long refreshMeta() throws IOException;

/**
* Refresh HFiles for the table
* @param tableName table to refresh HFiles for
* @return ID of the procedure started for refreshing HFiles
* @throws IOException if a remote or network exception occurs
*/
Long refreshHFiles(final TableName tableName) throws IOException;

/**
* Refresh HFiles for all the tables under given namespace
* @param namespace Namespace for which we should call refresh HFiles for all tables under it
* @return ID of the procedure started for refreshing HFiles
* @throws IOException if a remote or network exception occurs
*/
Long refreshHFiles(final String namespace) throws IOException;

/**
* Refresh HFiles for all the tables
* @return ID of the procedure started for refreshing HFiles
* @throws IOException if a remote or network exception occurs
*/
Long refreshHFiles() throws IOException;

@InterfaceAudience.Private
void restoreBackupSystemTable(String snapshotName) throws IOException;
}
Loading
Loading