use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class MapReduceBackupMergeJob method run.
@Override
public void run(String[] backupIds) throws IOException {
String bulkOutputConfKey;
// TODO : run player on remote cluster
player = new MapReduceHFileSplitterJob();
bulkOutputConfKey = MapReduceHFileSplitterJob.BULK_OUTPUT_CONF_KEY;
// Player reads all files in arbitrary directory structure and creates
// a Map task for each file
String bids = StringUtils.join(backupIds, ",");
if (LOG.isDebugEnabled()) {
LOG.debug("Merge backup images " + bids);
}
List<Pair<TableName, Path>> processedTableList = new ArrayList<>();
boolean finishedTables = false;
Connection conn = ConnectionFactory.createConnection(getConf());
BackupSystemTable table = new BackupSystemTable(conn);
FileSystem fs = FileSystem.get(getConf());
try {
// Get exclusive lock on backup system
table.startBackupExclusiveOperation();
// Start merge operation
table.startMergeOperation(backupIds);
// Select most recent backup id
String mergedBackupId = BackupUtils.findMostRecentBackupId(backupIds);
TableName[] tableNames = getTableNamesInBackupImages(backupIds);
BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
String backupRoot = bInfo.getBackupRootDir();
for (int i = 0; i < tableNames.length; i++) {
LOG.info("Merge backup images for " + tableNames[i]);
// Find input directories for table
Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
String dirs = StringUtils.join(dirPaths, ",");
Path bulkOutputPath = BackupUtils.getBulkOutputDir(BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false);
// Delete content if exists
if (fs.exists(bulkOutputPath)) {
if (!fs.delete(bulkOutputPath, true)) {
LOG.warn("Can not delete: " + bulkOutputPath);
}
}
Configuration conf = getConf();
conf.set(bulkOutputConfKey, bulkOutputPath.toString());
String[] playerArgs = { dirs, tableNames[i].getNameAsString() };
player.setConf(getConf());
int result = player.run(playerArgs);
if (!succeeded(result)) {
throw new IOException("Can not merge backup images for " + dirs + " (check Hadoop/MR and HBase logs). Player return code =" + result);
}
// Add to processed table list
processedTableList.add(new Pair<>(tableNames[i], bulkOutputPath));
LOG.debug("Merge Job finished:" + result);
}
List<TableName> tableList = toTableNameList(processedTableList);
table.updateProcessedTablesForMerge(tableList);
finishedTables = true;
// PHASE 2 (modification of a backup file system)
// Move existing mergedBackupId data into tmp directory
// we will need it later in case of a failure
Path tmpBackupDir = HBackupFileSystem.getBackupTmpDirPathForBackupId(backupRoot, mergedBackupId);
Path backupDirPath = HBackupFileSystem.getBackupPath(backupRoot, mergedBackupId);
if (!fs.rename(backupDirPath, tmpBackupDir)) {
throw new IOException("Failed to rename " + backupDirPath + " to " + tmpBackupDir);
} else {
LOG.debug("Renamed " + backupDirPath + " to " + tmpBackupDir);
}
// Move new data into backup dest
for (Pair<TableName, Path> tn : processedTableList) {
moveData(fs, backupRoot, tn.getSecond(), tn.getFirst(), mergedBackupId);
}
// Update backup manifest
List<String> backupsToDelete = getBackupIdsToDelete(backupIds, mergedBackupId);
updateBackupManifest(tmpBackupDir.getParent().toString(), mergedBackupId, backupsToDelete);
// Copy meta files back from tmp to backup dir
copyMetaData(fs, tmpBackupDir, backupDirPath);
// Delete tmp dir (Rename back during repair)
if (!fs.delete(tmpBackupDir, true)) {
// WARN and ignore
LOG.warn("Could not delete tmp dir: " + tmpBackupDir);
}
// Delete old data
deleteBackupImages(backupsToDelete, conn, fs, backupRoot);
// Finish merge session
table.finishMergeOperation();
// Release lock
table.finishBackupExclusiveOperation();
} catch (RuntimeException e) {
throw e;
} catch (Exception e) {
LOG.error(e.toString(), e);
if (!finishedTables) {
// cleanup bulk directories and finish merge
// merge MUST be repeated (no need for repair)
cleanupBulkLoadDirs(fs, toPathList(processedTableList));
table.finishMergeOperation();
table.finishBackupExclusiveOperation();
throw new IOException("Backup merge operation failed, you should try it again", e);
} else {
// backup repair must be run
throw new IOException("Backup merge operation failed, run backup repair tool to restore system's integrity", e);
}
} finally {
table.close();
conn.close();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupDeleteWithFailures method testBackupDeleteWithFailuresAfter.
private void testBackupDeleteWithFailuresAfter(int expected, Failure... failures) throws Exception {
LOG.info("test repair backup delete on a single table with data and failures " + failures[0]);
List<TableName> tableList = Lists.newArrayList(table1);
String backupId = fullTableBackup(tableList);
assertTrue(checkSucceeded(backupId));
LOG.info("backup complete");
String[] backupIds = new String[] { backupId };
BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
BackupInfo info = table.readBackupInfo(backupId);
Path path = new Path(info.getBackupRootDir(), backupId);
FileSystem fs = FileSystem.get(path.toUri(), conf1);
assertTrue(fs.exists(path));
Connection conn = TEST_UTIL.getConnection();
Admin admin = conn.getAdmin();
MasterSnapshotObserver observer = getMasterSnapshotObserver();
observer.setFailures(failures);
try {
getBackupAdmin().deleteBackups(backupIds);
} catch (IOException e) {
if (expected != 1) {
assertTrue(false);
}
}
// Verify that history length == expected after delete failure
assertTrue(table.getBackupHistory().size() == expected);
String[] ids = table.getListOfBackupIdsFromDeleteOperation();
// Verify that we still have delete record in backup system table
if (expected == 1) {
assertTrue(ids.length == 1);
assertTrue(ids[0].equals(backupId));
} else {
assertNull(ids);
}
// Now run repair command to repair "failed" delete operation
String[] args = new String[] { "repair" };
observer.setFailures(Failure.NO_FAILURES);
// Run repair
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
// Verify that history length == 0
assertTrue(table.getBackupHistory().size() == 0);
ids = table.getListOfBackupIdsFromDeleteOperation();
// Verify that we do not have delete record in backup system table
assertNull(ids);
table.close();
admin.close();
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupSystemTable method testBackupSetRemove.
@Test
public void testBackupSetRemove() throws IOException {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
String[] tables = new String[] { "table1", "table2", "table3", "table4" };
String setName = "name";
table.addToBackupSet(setName, tables);
String[] removeTables = new String[] { "table4", "table3" };
table.removeFromBackupSet(setName, removeTables);
Set<String> expectedTables = new HashSet<>(Arrays.asList("table1", "table2"));
List<TableName> tnames = table.describeBackupSet(setName);
assertTrue(tnames != null);
assertTrue(tnames.size() == expectedTables.size());
for (TableName tableName : tnames) {
assertTrue(expectedTables.remove(tableName.getNameAsString()));
}
cleanBackupTable();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupSystemTable method testBackupSetAddExists.
@Test
public void testBackupSetAddExists() throws IOException {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
String[] tables = new String[] { "table1", "table2", "table3" };
String setName = "name";
table.addToBackupSet(setName, tables);
String[] addTables = new String[] { "table4", "table5", "table6" };
table.addToBackupSet(setName, addTables);
Set<String> expectedTables = new HashSet<>(Arrays.asList("table1", "table2", "table3", "table4", "table5", "table6"));
List<TableName> tnames = table.describeBackupSet(setName);
assertTrue(tnames != null);
assertTrue(tnames.size() == expectedTables.size());
for (TableName tableName : tnames) {
assertTrue(expectedTables.remove(tableName.getNameAsString()));
}
cleanBackupTable();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupSystemTable method testBackupDelete.
@Test
public void testBackupDelete() throws IOException {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
int n = 10;
List<BackupInfo> list = createBackupInfoList(n);
// Load data
for (BackupInfo bc : list) {
// Make sure we set right status
bc.setState(BackupState.COMPLETE);
table.updateBackupInfo(bc);
}
// Verify exists
for (BackupInfo bc : list) {
assertNotNull(table.readBackupInfo(bc.getBackupId()));
}
// Delete all
for (BackupInfo bc : list) {
table.deleteBackupInfo(bc.getBackupId());
}
// Verify do not exists
for (BackupInfo bc : list) {
assertNull(table.readBackupInfo(bc.getBackupId()));
}
cleanBackupTable();
}
}
Aggregations