Search in sources :

Example 1 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestIncrementalBackupDeleteTable method TestIncBackupDeleteTable.

// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupDeleteTable() throws Exception {
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    HBaseAdmin admin = null;
    Connection conn = ConnectionFactory.createConnection(conf1);
    admin = (HBaseAdmin) conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table table1
    HTable t1 = (HTable) conn.getTable(table1);
    Put p1;
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        p1 = new Put(Bytes.toBytes("row-t1" + i));
        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t1.put(p1);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
    t1.close();
    // Delete table table2
    admin.disableTable(table2);
    admin.deleteTable(table2);
    // #3 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple));
    // #4 - restore full backup for all tables, without overwrite
    TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
    TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
    // #5.1 - check tables for full restore
    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    assertTrue(hAdmin.tableExists(table2_restore));
    // #5.2 - checking row count of tables for full restore
    HTable hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
    hTable.close();
    hTable = (HTable) conn.getTable(table2_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
    hTable.close();
    // #6 - restore incremental backup for table1
    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
    hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
    hTable.close();
    admin.close();
    conn.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 2 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestBackupBase method backupTables.

protected String backupTables(BackupType type, List<TableName> tables, String path) throws IOException {
    Connection conn = null;
    BackupAdmin badmin = null;
    String backupId;
    try {
        conn = ConnectionFactory.createConnection(conf1);
        badmin = new BackupAdminImpl(conn);
        BackupRequest request = createBackupRequest(type, tables, path);
        backupId = badmin.backupTables(request);
    } finally {
        if (badmin != null) {
            badmin.close();
        }
        if (conn != null) {
            conn.close();
        }
    }
    return backupId;
}
Also used : BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection)

Example 3 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestIncrementalBackupDeleteTable method testIncBackupDeleteTable.

// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void testIncBackupDeleteTable() throws Exception {
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    Connection conn = ConnectionFactory.createConnection(conf1);
    Admin admin = conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table table1
    Table t1 = conn.getTable(table1);
    Put p1;
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        p1 = new Put(Bytes.toBytes("row-t1" + i));
        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t1.put(p1);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
    t1.close();
    // Delete table table2
    admin.disableTable(table2);
    admin.deleteTable(table2);
    // #3 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple));
    // #4 - restore full backup for all tables, without overwrite
    TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
    TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
    // #5.1 - check tables for full restore
    Admin hAdmin = TEST_UTIL.getAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    assertTrue(hAdmin.tableExists(table2_restore));
    // #5.2 - checking row count of tables for full restore
    Table hTable = conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
    hTable.close();
    hTable = conn.getTable(table2_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
    hTable.close();
    // #6 - restore incremental backup for table1
    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
    hTable = conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
    hTable.close();
    admin.close();
    conn.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 4 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestIncrementalBackupWithFailures method testIncBackupRestore.

// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void testIncBackupRestore() throws Exception {
    int ADD_ROWS = 99;
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    final byte[] fam3Name = Bytes.toBytes("f3");
    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build();
    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
    Connection conn = ConnectionFactory.createConnection(conf1);
    int NB_ROWS_FAM3 = 6;
    insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
    Admin admin = conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table
    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
    t1.close();
    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
    Table t2 = conn.getTable(table2);
    Put p2;
    for (int i = 0; i < 5; i++) {
        p2 = new Put(Bytes.toBytes("row-t2" + i));
        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t2.put(p2);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
    t2.close();
    LOG.debug("written " + 5 + " rows to " + table2);
    // #3 - incremental backup for multiple tables
    incrementalBackupWithFailures();
    admin.close();
    conn.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 5 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class IntegrationTestBackupRestore method runTest.

private void runTest() throws IOException {
    try (Connection conn = util.getConnection();
        Admin admin = conn.getAdmin();
        BackupAdmin client = new BackupAdminImpl(conn)) {
        // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
        loadData(TABLE_NAME1, rowsInBatch);
        loadData(TABLE_NAME2, rowsInBatch);
        // #1 - create full backup for all tables
        LOG.info("create full backup image for all tables");
        List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
        BackupRequest.Builder builder = new BackupRequest.Builder();
        BackupRequest request = builder.withBackupType(BackupType.FULL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdFull = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdFull));
        // #2 - insert some data to table
        loadData(TABLE_NAME1, rowsInBatch);
        loadData(TABLE_NAME2, rowsInBatch);
        HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(t1), rowsInBatch * 2);
        t1.close();
        HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(t2), rowsInBatch * 2);
        t2.close();
        // #3 - incremental backup for tables
        tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
        builder = new BackupRequest.Builder();
        request = builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdIncMultiple = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdIncMultiple));
        // #4 - restore full backup for all tables, without overwrite
        TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
        client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, null, true));
        // #5.1 - check tables for full restore
        assertTrue(admin.tableExists(TABLE_NAME1));
        assertTrue(admin.tableExists(TABLE_NAME2));
        // #5.2 - checking row count of tables for full restore
        HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch);
        hTable.close();
        hTable = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch);
        hTable.close();
        // #6 - restore incremental backup for multiple tables, with overwrite
        TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
        client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, null, true));
        hTable = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
        hTable.close();
        hTable = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
        hTable.close();
    }
}
Also used : BackupRequest(org.apache.hadoop.hbase.backup.BackupRequest) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) Admin(org.apache.hadoop.hbase.client.Admin) HTable(org.apache.hadoop.hbase.client.HTable)

Aggregations

BackupAdminImpl (org.apache.hadoop.hbase.backup.impl.BackupAdminImpl)12 Connection (org.apache.hadoop.hbase.client.Connection)12 TableName (org.apache.hadoop.hbase.TableName)9 Admin (org.apache.hadoop.hbase.client.Admin)9 Table (org.apache.hadoop.hbase.client.Table)8 Test (org.junit.Test)8 Put (org.apache.hadoop.hbase.client.Put)6 BackupSystemTable (org.apache.hadoop.hbase.backup.impl.BackupSystemTable)4 IOException (java.io.IOException)2 BackupAdmin (org.apache.hadoop.hbase.backup.BackupAdmin)2 BackupRequest (org.apache.hadoop.hbase.backup.BackupRequest)2 HTable (org.apache.hadoop.hbase.client.HTable)2 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Configuration (org.apache.hadoop.conf.Configuration)1 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)1 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)1