use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.
the class TestIncrementalBackupDeleteTable method TestIncBackupDeleteTable.
// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupDeleteTable() throws Exception {
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
HBaseAdmin admin = null;
Connection conn = ConnectionFactory.createConnection(conf1);
admin = (HBaseAdmin) conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table table1
HTable t1 = (HTable) conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
t1.close();
// Delete table table2
admin.disableTable(table2);
admin.deleteTable(table2);
// #3 - incremental backup for table1
tables = Lists.newArrayList(table1);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// #4 - restore full backup for all tables, without overwrite
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
// #5.1 - check tables for full restore
HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
assertTrue(hAdmin.tableExists(table2_restore));
// #5.2 - checking row count of tables for full restore
HTable hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
hTable = (HTable) conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
// #6 - restore incremental backup for table1
TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
hTable = (HTable) conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
hTable.close();
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.
the class TestBackupBase method backupTables.
protected String backupTables(BackupType type, List<TableName> tables, String path) throws IOException {
Connection conn = null;
BackupAdmin badmin = null;
String backupId;
try {
conn = ConnectionFactory.createConnection(conf1);
badmin = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(type, tables, path);
backupId = badmin.backupTables(request);
} finally {
if (badmin != null) {
badmin.close();
}
if (conn != null) {
conn.close();
}
}
return backupId;
}
use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.
the class TestIncrementalBackupDeleteTable method testIncBackupDeleteTable.
// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void testIncBackupDeleteTable() throws Exception {
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
Connection conn = ConnectionFactory.createConnection(conf1);
Admin admin = conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table table1
Table t1 = conn.getTable(table1);
Put p1;
for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
p1 = new Put(Bytes.toBytes("row-t1" + i));
p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t1.put(p1);
}
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
t1.close();
// Delete table table2
admin.disableTable(table2);
admin.deleteTable(table2);
// #3 - incremental backup for table1
tables = Lists.newArrayList(table1);
request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// #4 - restore full backup for all tables, without overwrite
TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
// #5.1 - check tables for full restore
Admin hAdmin = TEST_UTIL.getAdmin();
assertTrue(hAdmin.tableExists(table1_restore));
assertTrue(hAdmin.tableExists(table2_restore));
// #5.2 - checking row count of tables for full restore
Table hTable = conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
hTable = conn.getTable(table2_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
hTable.close();
// #6 - restore incremental backup for table1
TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
hTable = conn.getTable(table1_restore);
Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
hTable.close();
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.
the class TestIncrementalBackupWithFailures method testIncBackupRestore.
// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void testIncBackupRestore() throws Exception {
int ADD_ROWS = 99;
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(table1, table2);
final byte[] fam3Name = Bytes.toBytes("f3");
TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build();
TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
Connection conn = ConnectionFactory.createConnection(conf1);
int NB_ROWS_FAM3 = 6;
insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
Admin admin = conn.getAdmin();
BackupAdminImpl client = new BackupAdminImpl(conn);
BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table
Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
t1.close();
LOG.debug("written " + ADD_ROWS + " rows to " + table1);
Table t2 = conn.getTable(table2);
Put p2;
for (int i = 0; i < 5; i++) {
p2 = new Put(Bytes.toBytes("row-t2" + i));
p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
t2.put(p2);
}
Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
t2.close();
LOG.debug("written " + 5 + " rows to " + table2);
// #3 - incremental backup for multiple tables
incrementalBackupWithFailures();
admin.close();
conn.close();
}
use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.
the class IntegrationTestBackupRestore method runTest.
private void runTest() throws IOException {
try (Connection conn = util.getConnection();
Admin admin = conn.getAdmin();
BackupAdmin client = new BackupAdminImpl(conn)) {
// #0- insert some data to table TABLE_NAME1, TABLE_NAME2
loadData(TABLE_NAME1, rowsInBatch);
loadData(TABLE_NAME2, rowsInBatch);
// #1 - create full backup for all tables
LOG.info("create full backup image for all tables");
List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
BackupRequest.Builder builder = new BackupRequest.Builder();
BackupRequest request = builder.withBackupType(BackupType.FULL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));
// #2 - insert some data to table
loadData(TABLE_NAME1, rowsInBatch);
loadData(TABLE_NAME2, rowsInBatch);
HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
Assert.assertEquals(util.countRows(t1), rowsInBatch * 2);
t1.close();
HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
Assert.assertEquals(util.countRows(t2), rowsInBatch * 2);
t2.close();
// #3 - incremental backup for tables
tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
builder = new BackupRequest.Builder();
request = builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));
// #4 - restore full backup for all tables, without overwrite
TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, null, true));
// #5.1 - check tables for full restore
assertTrue(admin.tableExists(TABLE_NAME1));
assertTrue(admin.tableExists(TABLE_NAME2));
// #5.2 - checking row count of tables for full restore
HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
Assert.assertEquals(util.countRows(hTable), rowsInBatch);
hTable.close();
hTable = (HTable) conn.getTable(TABLE_NAME2);
Assert.assertEquals(util.countRows(hTable), rowsInBatch);
hTable.close();
// #6 - restore incremental backup for multiple tables, with overwrite
TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, null, true));
hTable = (HTable) conn.getTable(TABLE_NAME1);
Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
hTable.close();
hTable = (HTable) conn.getTable(TABLE_NAME2);
Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
hTable.close();
}
}
Aggregations