use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestFullBackup method testFullBackupMultipleCommand.
@Test
public void testFullBackupMultipleCommand() throws Exception {
LOG.info("test full backup on a multiple tables with data: command-line");
try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
int before = table.getBackupHistory().size();
String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", table1.getNameAsString() + "," + table2.getNameAsString() };
// Run backup
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
List<BackupInfo> backups = table.getBackupHistory();
int after = table.getBackupHistory().size();
assertTrue(after == before + 1);
for (BackupInfo data : backups) {
String backupId = data.getBackupId();
assertTrue(checkSucceeded(backupId));
}
}
LOG.info("backup complete");
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestFullBackupSetRestoreSet method testFullRestoreSetToOtherTable.
@Test
public void testFullRestoreSetToOtherTable() throws Exception {
LOG.info("Test full restore set");
// Create set
try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
String name = "name";
table.addToBackupSet(name, new String[] { table1.getNameAsString() });
List<TableName> names = table.describeBackupSet(name);
assertNotNull(names);
assertTrue(names.size() == 1);
assertTrue(names.get(0).equals(table1));
String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name };
// Run backup
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
List<BackupInfo> backups = table.getBackupHistory();
assertTrue(backups.size() == 1);
String backupId = backups.get(0).getBackupId();
assertTrue(checkSucceeded(backupId));
LOG.info("backup complete");
// Restore from set into other table
args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", table1_restore.getNameAsString(), "-o" };
// Run backup
ret = ToolRunner.run(conf1, new RestoreDriver(), args);
assertTrue(ret == 0);
Admin hba = TEST_UTIL.getAdmin();
assertTrue(hba.tableExists(table1_restore));
// Verify number of rows in both tables
assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
TEST_UTIL.deleteTable(table1_restore);
LOG.info("restore into other table is complete");
hba.close();
}
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupDescribe method testBackupDescribeCommand.
@Test
public void testBackupDescribeCommand() throws Exception {
LOG.info("test backup describe on a single table with data: command-line");
List<TableName> tableList = Lists.newArrayList(table1);
String backupId = fullTableBackup(tableList);
LOG.info("backup complete");
assertTrue(checkSucceeded(backupId));
BackupInfo info = getBackupAdmin().getBackupInfo(backupId);
assertTrue(info.getState() == BackupState.COMPLETE);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
System.setOut(new PrintStream(baos));
String[] args = new String[] { "describe", backupId };
// Run backup
int ret = ToolRunner.run(conf1, new BackupDriver(), args);
assertTrue(ret == 0);
String response = baos.toString();
assertTrue(response.indexOf(backupId) > 0);
assertTrue(response.indexOf("COMPLETE") > 0);
BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
BackupInfo status = table.readBackupInfo(backupId);
String desc = status.getShortDescription();
table.close();
assertTrue(response.indexOf(desc) >= 0);
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupDelete method testBackupDelete.
/**
* Verify that full backup is created on a single table with data correctly. Verify that history
* works as expected.
*
* @throws Exception if doing the backup or an operation on the tables fails
*/
@Test
public void testBackupDelete() throws Exception {
LOG.info("test backup delete on a single table with data");
List<TableName> tableList = Lists.newArrayList(table1);
String backupId = fullTableBackup(tableList);
assertTrue(checkSucceeded(backupId));
LOG.info("backup complete");
String[] backupIds = new String[] { backupId };
BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
BackupInfo info = table.readBackupInfo(backupId);
Path path = new Path(info.getBackupRootDir(), backupId);
FileSystem fs = FileSystem.get(path.toUri(), conf1);
assertTrue(fs.exists(path));
int deleted = getBackupAdmin().deleteBackups(backupIds);
assertTrue(!fs.exists(path));
assertTrue(fs.exists(new Path(info.getBackupRootDir())));
assertTrue(1 == deleted);
table.close();
LOG.info("delete_backup");
}
use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.
the class TestBackupHFileCleaner method testGetDeletableFiles.
@Test
public void testGetDeletableFiles() throws IOException {
// 1. Create a file
Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
fs.createNewFile(file);
// 2. Assert file is successfully created
assertTrue("Test file not created!", fs.exists(file));
BackupHFileCleaner cleaner = new BackupHFileCleaner();
cleaner.setConf(conf);
cleaner.setCheckForFullyBackedUpTables(false);
// 3. Assert that file as is should be deletable
List<FileStatus> stats = new ArrayList<>();
FileStatus stat = fs.getFileStatus(file);
stats.add(stat);
Iterable<FileStatus> deletable = cleaner.getDeletableFiles(stats);
deletable = cleaner.getDeletableFiles(stats);
boolean found = false;
for (FileStatus stat1 : deletable) {
if (stat.equals(stat1)) {
found = true;
}
}
assertTrue("Cleaner should allow to delete this file as there is no hfile reference " + "for it.", found);
// 4. Add the file as bulk load
List<Path> list = new ArrayList<>(1);
list.add(file);
try (Connection conn = ConnectionFactory.createConnection(conf);
BackupSystemTable sysTbl = new BackupSystemTable(conn)) {
List<TableName> sTableList = new ArrayList<>();
sTableList.add(tableName);
Map<byte[], List<Path>>[] maps = new Map[1];
maps[0] = new HashMap<>();
maps[0].put(Bytes.toBytes(famName), list);
sysTbl.writeBulkLoadedFiles(sTableList, maps, "1");
}
// 5. Assert file should not be deletable
deletable = cleaner.getDeletableFiles(stats);
deletable = cleaner.getDeletableFiles(stats);
found = false;
for (FileStatus stat1 : deletable) {
if (stat.equals(stat1)) {
found = true;
}
}
assertFalse("Cleaner should not allow to delete this file as there is a hfile reference " + "for it.", found);
}
Aggregations