use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupSystemTable method testBackupSetAddNotExists.
/**
* Backup set tests
*/
@Test
public void testBackupSetAddNotExists() throws IOException {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
String[] tables = new String[] { "table1", "table2", "table3" };
String setName = "name";
table.addToBackupSet(setName, tables);
List<TableName> tnames = table.describeBackupSet(setName);
assertTrue(tnames != null);
assertTrue(tnames.size() == tables.length);
for (int i = 0; i < tnames.size(); i++) {
assertTrue(tnames.get(i).getNameAsString().equals(tables[i]));
}
cleanBackupTable();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class BackupLogCleaner method getDeletableFiles.
@Override
public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
// so we cannot filter the files
if (this.getConf() == null || !BackupManager.isBackupEnabled(getConf())) {
LOG.warn("Backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting");
return files;
}
List<FileStatus> list = new ArrayList<FileStatus>();
try (final BackupSystemTable table = new BackupSystemTable(conn)) {
// If we do not have recorded backup sessions
try {
if (!table.hasBackupSessions()) {
LOG.trace("BackupLogCleaner has no backup sessions");
return files;
}
} catch (TableNotFoundException tnfe) {
LOG.warn("backup system table is not available" + tnfe.getMessage());
return files;
}
for (FileStatus file : files) {
String wal = file.getPath().toString();
boolean logInSystemTable = table.isWALFileDeletable(wal);
if (LOG.isDebugEnabled()) {
if (logInSystemTable) {
LOG.debug("Found log file in backup system table, deleting: " + wal);
list.add(file);
} else {
LOG.debug("Didn't find this log in backup system table, keeping: " + wal);
}
}
}
return list;
} catch (IOException e) {
LOG.error("Failed to get backup system table table, therefore will keep all files", e);
// nothing to delete
return new ArrayList<FileStatus>();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupDescribe method testBackupDescribeCommand.
@Test
public void testBackupDescribeCommand() throws Exception {
LOG.info("test backup describe on a single table with data: command-line");
List<TableName> tableList = Lists.newArrayList(table1);
String backupId = fullTableBackup(tableList);
LOG.info("backup complete");
assertTrue(checkSucceeded(backupId));
BackupInfo info = getBackupAdmin().getBackupInfo(backupId);
assertTrue(info.getState() == BackupState.COMPLETE);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
System.setOut(new PrintStream(baos));
String[] args = new String[] { "describe", backupId };
// Run backup
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
String response = baos.toString();
assertTrue(response.indexOf(backupId) > 0);
assertTrue(response.indexOf("COMPLETE") > 0);
BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
BackupInfo status = table.readBackupInfo(backupId);
String desc = status.getShortDescription();
table.close();
assertTrue(response.indexOf(desc) >= 0);
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupSystemTable method testIncrementalBackupTableSet.
@Test
public void testIncrementalBackupTableSet() throws IOException {
TreeSet<TableName> tables1 = new TreeSet<>();
tables1.add(TableName.valueOf("t1"));
tables1.add(TableName.valueOf("t2"));
tables1.add(TableName.valueOf("t3"));
TreeSet<TableName> tables2 = new TreeSet<>();
tables2.add(TableName.valueOf("t3"));
tables2.add(TableName.valueOf("t4"));
tables2.add(TableName.valueOf("t5"));
table.addIncrementalBackupTableSet(tables1, "root");
BackupSystemTable table = new BackupSystemTable(conn);
TreeSet<TableName> res1 = (TreeSet<TableName>) table.getIncrementalBackupTableSet("root");
assertTrue(tables1.size() == res1.size());
Iterator<TableName> desc1 = tables1.descendingIterator();
Iterator<TableName> desc2 = res1.descendingIterator();
while (desc1.hasNext()) {
assertEquals(desc1.next(), desc2.next());
}
table.addIncrementalBackupTableSet(tables2, "root");
TreeSet<TableName> res2 = (TreeSet<TableName>) table.getIncrementalBackupTableSet("root");
assertTrue((tables2.size() + tables1.size() - 1) == res2.size());
tables1.addAll(tables2);
desc1 = tables1.descendingIterator();
desc2 = res2.descendingIterator();
while (desc1.hasNext()) {
assertEquals(desc1.next(), desc2.next());
}
cleanBackupTable();
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupSystemTable method testBackupSetDelete.
@Test
public void testBackupSetDelete() throws IOException {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
String[] tables = new String[] { "table1", "table2", "table3", "table4" };
String setName = "name";
table.addToBackupSet(setName, tables);
table.deleteBackupSet(setName);
List<TableName> tnames = table.describeBackupSet(setName);
assertTrue(tnames == null);
cleanBackupTable();
}
}
Aggregations