Search in sources :

Example 1 with BackupAdmin

use of org.apache.hadoop.hbase.backup.BackupAdmin in project hbase by apache.

the class IntegrationTestBackupRestore method runTest.

private void runTest() throws IOException {
    try (Connection conn = util.getConnection();
        Admin admin = conn.getAdmin();
        BackupAdmin client = new BackupAdminImpl(conn)) {
        // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
        loadData(TABLE_NAME1, rowsInBatch);
        loadData(TABLE_NAME2, rowsInBatch);
        // #1 - create full backup for all tables
        LOG.info("create full backup image for all tables");
        List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
        BackupRequest.Builder builder = new BackupRequest.Builder();
        BackupRequest request = builder.withBackupType(BackupType.FULL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdFull = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdFull));
        // #2 - insert some data to table
        loadData(TABLE_NAME1, rowsInBatch);
        loadData(TABLE_NAME2, rowsInBatch);
        HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(t1), rowsInBatch * 2);
        t1.close();
        HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(t2), rowsInBatch * 2);
        t2.close();
        // #3 - incremental backup for tables
        tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
        builder = new BackupRequest.Builder();
        request = builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdIncMultiple = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdIncMultiple));
        // #4 - restore full backup for all tables, without overwrite
        TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
        client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, null, true));
        // #5.1 - check tables for full restore
        assertTrue(admin.tableExists(TABLE_NAME1));
        assertTrue(admin.tableExists(TABLE_NAME2));
        // #5.2 - checking row count of tables for full restore
        HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch);
        hTable.close();
        hTable = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch);
        hTable.close();
        // #6 - restore incremental backup for multiple tables, with overwrite
        TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
        client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, null, true));
        hTable = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
        hTable.close();
        hTable = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
        hTable.close();
    }
}
Also used : BackupRequest(org.apache.hadoop.hbase.backup.BackupRequest) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) Admin(org.apache.hadoop.hbase.client.Admin) HTable(org.apache.hadoop.hbase.client.HTable)

Aggregations

BackupAdmin (org.apache.hadoop.hbase.backup.BackupAdmin)1 BackupRequest (org.apache.hadoop.hbase.backup.BackupRequest)1 BackupAdminImpl (org.apache.hadoop.hbase.backup.impl.BackupAdminImpl)1 Admin (org.apache.hadoop.hbase.client.Admin)1 Connection (org.apache.hadoop.hbase.client.Connection)1 HTable (org.apache.hadoop.hbase.client.HTable)1