Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;

import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.impl.BackupManager;
import org.apache.hadoop.hbase.backup.impl.BackupSystemTable;
import org.apache.hadoop.hbase.backup.impl.BulkLoad;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.hbase.thirdparty.com.google.common.collect.Sets;

/**
* An Observer to facilitate backup operations
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class BackupMasterObserver implements MasterCoprocessor, MasterObserver {
private static final Logger LOG = LoggerFactory.getLogger(BackupMasterObserver.class);

@Override
public Optional<MasterObserver> getMasterObserver() {
return Optional.of(this);
}

@Override
public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration();
if (!BackupManager.isBackupEnabled(cfg)) {
LOG.debug("Skipping postDeleteTable hook since backup is disabled");
return;
}
deleteBulkLoads(cfg, tableName, (ignored) -> true);
}

@Override
public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration();
if (!BackupManager.isBackupEnabled(cfg)) {
LOG.debug("Skipping postTruncateTable hook since backup is disabled");
return;
}
deleteBulkLoads(cfg, tableName, (ignored) -> true);
}

@Override
public void postModifyTable(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, TableDescriptor oldDescriptor, TableDescriptor currentDescriptor)
throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration();
if (!BackupManager.isBackupEnabled(cfg)) {
LOG.debug("Skipping postModifyTable hook since backup is disabled");
return;
}

Set<String> oldFamilies = Arrays.stream(oldDescriptor.getColumnFamilies())
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());
Set<String> newFamilies = Arrays.stream(currentDescriptor.getColumnFamilies())
.map(ColumnFamilyDescriptor::getNameAsString).collect(Collectors.toSet());

Set<String> removedFamilies = Sets.difference(oldFamilies, newFamilies);
if (!removedFamilies.isEmpty()) {
Predicate<BulkLoad> filter = bulkload -> removedFamilies.contains(bulkload.getColumnFamily());
deleteBulkLoads(cfg, tableName, filter);
}
}

/**
* Deletes all bulk load entries for the given table, matching the provided predicate.
*/
private void deleteBulkLoads(Configuration config, TableName tableName,
Predicate<BulkLoad> filter) throws IOException {
try (Connection connection = ConnectionFactory.createConnection(config);
BackupSystemTable tbl = new BackupSystemTable(connection)) {
List<BulkLoad> bulkLoads = tbl.readBulkloadRows(List.of(tableName));
List<byte[]> rowsToDelete =
bulkLoads.stream().filter(filter).map(BulkLoad::getRowKey).toList();
tbl.deleteBulkLoadedRows(rowsToDelete);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.backup;

import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.yetus.audience.InterfaceAudience;

/**
Expand Down Expand Up @@ -98,16 +99,17 @@ public interface BackupRestoreConstants {

String JOB_NAME_CONF_KEY = "mapreduce.job.name";

String BACKUP_CONFIG_STRING =
BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n" + "hbase.master.logcleaner.plugins="
+ "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+ "hbase.coprocessor.region.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.BackupObserver\n" + "and restart the cluster\n"
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
String BACKUP_CONFIG_STRING = BackupRestoreConstants.BACKUP_ENABLE_KEY + "=true\n"
+ "hbase.master.logcleaner.plugins="
+ "YOUR_PLUGINS,org.apache.hadoop.hbase.backup.master.BackupLogCleaner\n"
+ "hbase.procedure.master.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager\n"
+ "hbase.procedure.regionserver.classes=YOUR_CLASSES,"
+ "org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager\n"
+ CoprocessorHost.REGION_COPROCESSOR_CONF_KEY + "=YOUR_CLASSES,"
+ BackupObserver.class.getSimpleName() + "\n" + CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY
+ "=YOUR_CLASSES," + BackupMasterObserver.class.getSimpleName() + "\nand restart the cluster\n"
+ "For more information please see http://hbase.apache.org/book.html#backuprestore\n";
String ENABLE_BACKUP = "Backup is not enabled. To enable backup, " + "in hbase-site.xml, set:\n "
+ BACKUP_CONFIG_STRING;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,11 +119,17 @@ public static void decorateMasterConfiguration(Configuration conf) {
plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
(plugins == null ? "" : plugins + ",") + BackupHFileCleaner.class.getName());

String observerClass = BackupObserver.class.getName();
String masterCoProc = conf.get(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY);
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
(masterCoProc == null ? "" : masterCoProc + ",") + observerClass);

if (LOG.isDebugEnabled()) {
LOG.debug(
"Added log cleaner: {}. Added master procedure manager: {}."
+ "Added master procedure manager: {}",
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName());
+ " Added master procedure manager: {}. Added master observer: {}",
cleanerClass, masterProcedureClass, BackupHFileCleaner.class.getName(), observerClass);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -411,25 +411,24 @@ public void registerBulkLoad(TableName tableName, byte[] region,
try (BufferedMutator bufferedMutator = connection.getBufferedMutator(bulkLoadTableName)) {
List<Put> puts = BackupSystemTable.createPutForBulkLoad(tableName, region, cfToHfilePath);
bufferedMutator.mutate(puts);
LOG.debug("Written {} rows for bulk load of {}", puts.size(), tableName);
LOG.debug("Written {} rows for bulk load of table {}", puts.size(), tableName);
}
}

/*
* Removes rows recording bulk loaded hfiles from backup table
* @param lst list of table names
* @param rows the rows to be deleted
/**
* Removes entries from the table that tracks all bulk loaded hfiles.
* @param rows the row keys of the entries to be deleted
*/
public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
try (BufferedMutator bufferedMutator = connection.getBufferedMutator(bulkLoadTableName)) {
List<Delete> lstDels = new ArrayList<>();
List<Delete> deletes = new ArrayList<>();
for (byte[] row : rows) {
Delete del = new Delete(row);
lstDels.add(del);
LOG.debug("orig deleting the row: " + Bytes.toString(row));
deletes.add(del);
LOG.debug("Deleting bulk load entry with key: {}", Bytes.toString(row));
}
bufferedMutator.mutate(lstDels);
LOG.debug("deleted " + rows.size() + " original bulkload rows");
bufferedMutator.mutate(deletes);
LOG.debug("Deleted {} bulk load entries.", rows.size());
}
}

Expand Down Expand Up @@ -1522,16 +1521,6 @@ public static void deleteSnapshot(Connection conn) throws IOException {
}
}

public static List<Delete> createDeleteForOrigBulkLoad(List<TableName> lst) {
List<Delete> lstDels = new ArrayList<>(lst.size());
for (TableName table : lst) {
Delete del = new Delete(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM));
del.addFamily(BackupSystemTable.META_FAMILY);
lstDels.add(del);
}
return lstDels;
}

private Put createPutForDeleteOperation(String[] backupIdList) {
byte[] value = Bytes.toBytes(StringUtils.join(backupIdList, ","));
Put put = new Put(DELETE_OP_ROW);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupCopyJob;
Expand Down Expand Up @@ -152,6 +153,11 @@ public void execute() throws IOException {
// the snapshot.
LOG.info("Execute roll log procedure for full backup ...");

// Gather the bulk loads being tracked by the system, which can be deleted (since their data
// will be part of the snapshot being taken). We gather this list before taking the actual
// snapshots for the same reason as the log rolls.
List<BulkLoad> bulkLoadsToDelete = backupManager.readBulkloadRows(tableList);

Map<String, String> props = new HashMap<>();
props.put("backupRoot", backupInfo.getBackupRootDir());
admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
Expand Down Expand Up @@ -192,6 +198,9 @@ public void execute() throws IOException {
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);

backupManager
.deleteBulkLoadedRows(bulkLoadsToDelete.stream().map(BulkLoad::getRowKey).toList());
Copy link
Contributor

@hgromer hgromer Mar 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

At my company, we have a similar patch applied to our fork, and we've run into issues with batch sizes that causes backup failures. This seems to happen when there are too many rows to delete, you end up with something like

Caused by: org.apache.hbase.thirdparty.com.google.protobuf.ServiceException: Rejecting large batch operation for current batch with firstRegionName: backup:system_bulk,,1739970553683.c3828af81a4b3847aa0f1612bf638713. , Requested Number of Rows: 2048 , Size Threshold: 1500
	at org.apache.hadoop.hbase.regionserver.RSRpcServices.checkBatchSizeAndLogLargeSize(RSRpcServices.java:2721)
	at org.apache.hadoop.hbase.regionserver.RSRpcServices.multi(RSRpcServices.java:2757)
	at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43520)
	at org.apache.ha

It might be worth splitting up this call to delete if they are exceptionally large

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@hgromer do you feel that splitting up the delete is required for this patch?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@DieterDP-ng any concerns about making this change?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be worth splitting this up. Introducing this means we can get into a situation where backups are unable to succeed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No objection to splitting it. Updating the PR...

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than doing batching here, I did it at BackupSystemTable.deleteBulkLoadedRows, since that method is used in multiple locations, and I guess they would all benefit from batching. See added commit (can be squashed).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(For future reference: turned out batching was already built-in, see #6506 (comment))


// backup complete
completeBackup(conn, backupInfo, BackupType.FULL, conf);
} catch (Exception e) {
Expand Down
Loading