use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestFullBackupSetRestoreSet method testFullRestoreSetToSameTable.
@Test
public void testFullRestoreSetToSameTable() throws Exception {
LOG.info("Test full restore set to same table");
// Create set
try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
String name = "name1";
table.addToBackupSet(name, new String[] { table1.getNameAsString() });
List<TableName> names = table.describeBackupSet(name);
assertNotNull(names);
assertTrue(names.size() == 1);
assertTrue(names.get(0).equals(table1));
String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name };
// Run backup
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
List<BackupInfo> backups = table.getBackupHistory();
String backupId = backups.get(0).getBackupId();
assertTrue(checkSucceeded(backupId));
LOG.info("backup complete");
int count = TEST_UTIL.countRows(table1);
TEST_UTIL.deleteTable(table1);
// Restore from set into other table
args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-o" };
// Run backup
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
assertTrue(ret == 0);
Admin hba = TEST_UTIL.getAdmin();
assertTrue(hba.tableExists(table1));
// Verify number of rows in both tables
assertEquals(count, TEST_UTIL.countRows(table1));
LOG.info("restore into same table is complete");
hba.close();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class BackupObserver method preCommitStoreFile.
@Override
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx, final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
Configuration cfg = ctx.getEnvironment().getConfiguration();
if (pairs == null || pairs.isEmpty() || !BackupManager.isBackupEnabled(cfg)) {
LOG.debug("skipping recording bulk load in preCommitStoreFile since backup is disabled");
return;
}
try (Connection connection = ConnectionFactory.createConnection(cfg);
BackupSystemTable tbl = new BackupSystemTable(connection)) {
List<TableName> fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
RegionInfo info = ctx.getEnvironment().getRegionInfo();
TableName tableName = info.getTable();
if (!fullyBackedUpTables.contains(tableName)) {
if (LOG.isTraceEnabled()) {
LOG.trace(tableName + " has not gone thru full backup");
}
return;
}
tbl.writeFilesForBulkLoadPreCommit(tableName, info.getEncodedNameAsBytes(), family, pairs);
return;
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class MapReduceBackupMergeJob method getTableNamesInBackupImages.
protected TableName[] getTableNamesInBackupImages(String[] backupIds) throws IOException {
Set<TableName> allSet = new HashSet<>();
try (Connection conn = ConnectionFactory.createConnection(conf);
BackupSystemTable table = new BackupSystemTable(conn)) {
for (String backupId : backupIds) {
BackupInfo bInfo = table.readBackupInfo(backupId);
allSet.addAll(bInfo.getTableNames());
}
}
TableName[] ret = new TableName[allSet.size()];
return allSet.toArray(ret);
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class BackupHFileCleaner method loadHFileRefs.
private Set<String> loadHFileRefs(List<TableName> tableList) throws IOException {
if (connection == null) {
connection = ConnectionFactory.createConnection(conf);
}
try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
Map<byte[], List<Path>>[] res = tbl.readBulkLoadedFiles(null, tableList);
secondPrevReadFromBackupTbl = prevReadFromBackupTbl;
prevReadFromBackupTbl = EnvironmentEdgeManager.currentTime();
return getFilenameFromBulkLoad(res);
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class BackupHFileCleaner method getDeletableFiles.
@Override
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
if (conf == null) {
return files;
}
// so that we filter BulkLoad to be returned from server
if (checkForFullyBackedUpTables) {
if (connection == null) {
return files;
}
try (BackupSystemTable tbl = new BackupSystemTable(connection)) {
fullyBackedUpTables = tbl.getTablesForBackupType(BackupType.FULL);
} catch (IOException ioe) {
LOG.error("Failed to get tables which have been fully backed up, skipping checking", ioe);
return Collections.emptyList();
}
Collections.sort(fullyBackedUpTables);
}
final Set<String> hfileRefs;
try {
hfileRefs = loadHFileRefs(fullyBackedUpTables);
} catch (IOException ioe) {
LOG.error("Failed to read hfile references, skipping checking deletable files", ioe);
return Collections.emptyList();
}
Iterable<FileStatus> deletables = Iterables.filter(files, file -> {
// If the file is recent, be conservative and wait for one more scan of backup:system table
if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
return false;
}
String hfile = file.getPath().getName();
boolean foundHFileRef = hfileRefs.contains(hfile);
return !foundHFileRef;
});
return deletables;
}
Aggregations