Search in sources :

Example 6 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class IntegrationTestBackupRestore method runTestSingle.

private void runTestSingle(TableName table) throws IOException {
    List<String> backupIds = new ArrayList<String>();
    List<Integer> tableSizes = new ArrayList<Integer>();
    try (Connection conn = util.getConnection();
        Admin admin = conn.getAdmin();
        BackupAdmin client = new BackupAdminImpl(conn)) {
        // #0- insert some data to table 'table'
        loadData(table, rowsInIteration);
        tableSizes.add(rowsInIteration);
        // #1 - create full backup for table first
        LOG.info("create full backup image for {}", table);
        List<TableName> tables = Lists.newArrayList(table);
        BackupRequest.Builder builder = new BackupRequest.Builder();
        BackupRequest request = builder.withBackupType(BackupType.FULL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdFull = backup(request, client);
        assertTrue(checkSucceeded(backupIdFull));
        backupIds.add(backupIdFull);
        // Now continue with incremental backups
        int count = 1;
        while (count++ < numIterations) {
            // Load data
            loadData(table, rowsInIteration);
            tableSizes.add(rowsInIteration * count);
            // Do incremental backup
            builder = new BackupRequest.Builder();
            request = builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
            String backupId = backup(request, client);
            assertTrue(checkSucceeded(backupId));
            backupIds.add(backupId);
            // Restore incremental backup for table, with overwrite for previous backup
            String previousBackupId = backupIds.get(backupIds.size() - 2);
            restoreVerifyTable(conn, client, table, previousBackupId, rowsInIteration * (count - 1));
            // Restore incremental backup for table, with overwrite for last backup
            restoreVerifyTable(conn, client, table, backupId, rowsInIteration * count);
        }
        // Now merge all incremental and restore
        String[] incBackupIds = allIncremental(backupIds);
        merge(incBackupIds, client);
        // Restore last one
        String backupId = incBackupIds[incBackupIds.length - 1];
        // restore incremental backup for table, with overwrite
        TableName[] tablesRestoreIncMultiple = new TableName[] { table };
        restore(createRestoreRequest(BACKUP_ROOT_DIR, backupId, false, tablesRestoreIncMultiple, null, true), client);
        Table hTable = conn.getTable(table);
        Assert.assertEquals(util.countRows(hTable), rowsInIteration * numIterations);
        hTable.close();
        LOG.info("{} loop {} finished.", Thread.currentThread().getName(), (count - 1));
    }
}
Also used : BackupRequest(org.apache.hadoop.hbase.backup.BackupRequest) BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) TableDescriptorBuilder(org.apache.hadoop.hbase.client.TableDescriptorBuilder) ColumnFamilyDescriptorBuilder(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder) ArrayList(java.util.ArrayList) Connection(org.apache.hadoop.hbase.client.Connection) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) Admin(org.apache.hadoop.hbase.client.Admin) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin)

Example 7 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestBackupMerge method TestIncBackupMergeRestore.

@Test
public void TestIncBackupMergeRestore() throws Exception {
    int ADD_ROWS = 99;
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    // Set custom Merge Job implementation
    Connection conn = ConnectionFactory.createConnection(conf1);
    Admin admin = conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table1
    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
    t1.close();
    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
    Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
    t2.close();
    LOG.debug("written " + ADD_ROWS + " rows to " + table2);
    // #3 - incremental backup for multiple tables
    tables = Lists.newArrayList(table1, table2);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple));
    t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS);
    t1.close();
    t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS);
    t2.close();
    // #3 - incremental backup for multiple tables
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple2 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple2));
    try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
        String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
        bAdmin.mergeBackups(backups);
    }
    // #6 - restore incremental backup for multiple tables, with overwrite
    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
    Table hTable = conn.getTable(table1_restore);
    LOG.debug("After incremental restore: " + hTable.getDescriptor());
    int countRows = TEST_UTIL.countRows(hTable, famName);
    LOG.debug("f1 has " + countRows + " rows");
    Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows);
    hTable.close();
    hTable = conn.getTable(table2_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
    hTable.close();
    admin.close();
    conn.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Test(org.junit.Test)

Example 8 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestIncrementalBackup method TestIncBackupRestore.

// implement all test cases in 1 test since incremental
// backup/restore has dependencies
@Test
public void TestIncBackupRestore() throws Exception {
    int ADD_ROWS = 99;
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    final byte[] fam3Name = Bytes.toBytes("f3");
    final byte[] mobName = Bytes.toBytes("mob");
    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(mobName).setMobEnabled(true).setMobThreshold(5L).build()).build();
    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
        int NB_ROWS_FAM3 = 6;
        insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
        insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
        Admin admin = conn.getAdmin();
        BackupAdminImpl client = new BackupAdminImpl(conn);
        BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
        String backupIdFull = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdFull));
        // #2 - insert some data to table
        Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
        LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
        Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
        LOG.debug("written " + ADD_ROWS + " rows to " + table1);
        // additionally, insert rows to MOB cf
        int NB_ROWS_MOB = 111;
        insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
        LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
        t1.close();
        Assert.assertEquals(HBaseTestingUtil.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
        Table t2 = conn.getTable(table2);
        Put p2;
        for (int i = 0; i < 5; i++) {
            p2 = new Put(Bytes.toBytes("row-t2" + i));
            p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
            t2.put(p2);
        }
        Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(t2));
        t2.close();
        LOG.debug("written " + 5 + " rows to " + table2);
        // split table1
        SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
        List<HRegion> regions = cluster.getRegions(table1);
        byte[] name = regions.get(0).getRegionInfo().getRegionName();
        long startSplitTime = EnvironmentEdgeManager.currentTime();
        try {
            admin.splitRegionAsync(name).get();
        } catch (Exception e) {
            // although split fail, this may not affect following check in current API,
            // exception will be thrown.
            LOG.debug("region is not splittable, because " + e);
        }
        while (!admin.isTableAvailable(table1)) {
            Thread.sleep(100);
        }
        long endSplitTime = EnvironmentEdgeManager.currentTime();
        // split finished
        LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
        // #3 - incremental backup for multiple tables
        tables = Lists.newArrayList(table1, table2);
        request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
        String backupIdIncMultiple = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdIncMultiple));
        // add column family f2 to table1
        // drop column family f3
        final byte[] fam2Name = Bytes.toBytes("f2");
        newTable1Desc = TableDescriptorBuilder.newBuilder(newTable1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam2Name)).removeColumnFamily(fam3Name).build();
        TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
        int NB_ROWS_FAM2 = 7;
        Table t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
        t3.close();
        // Wait for 5 sec to make sure that old WALs were deleted
        Thread.sleep(5000);
        // #4 - additional incremental backup for multiple tables
        request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
        String backupIdIncMultiple2 = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdIncMultiple2));
        // #5 - restore full backup for all tables
        TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
        TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
        LOG.debug("Restoring full " + backupIdFull);
        client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, true));
        // #6.1 - check tables for full restore
        Admin hAdmin = TEST_UTIL.getAdmin();
        assertTrue(hAdmin.tableExists(table1_restore));
        assertTrue(hAdmin.tableExists(table2_restore));
        hAdmin.close();
        // #6.2 - checking row count of tables for full restore
        Table hTable = conn.getTable(table1_restore);
        Assert.assertEquals(HBaseTestingUtil.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
        hTable.close();
        hTable = conn.getTable(table2_restore);
        Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtil.countRows(hTable));
        hTable.close();
        // #7 - restore incremental backup for multiple tables, with overwrite
        TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
        TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
        client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
        hTable = conn.getTable(table1_restore);
        LOG.debug("After incremental restore: " + hTable.getDescriptor());
        int countFamName = TEST_UTIL.countRows(hTable, famName);
        LOG.debug("f1 has " + countFamName + " rows");
        Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
        int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
        LOG.debug("f2 has " + countFam2Name + " rows");
        Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
        int countMobName = TEST_UTIL.countRows(hTable, mobName);
        LOG.debug("mob has " + countMobName + " rows");
        Assert.assertEquals(countMobName, NB_ROWS_MOB);
        hTable.close();
        hTable = conn.getTable(table2_restore);
        Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtil.countRows(hTable));
        hTable.close();
        admin.close();
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Test(org.junit.Test)

Example 9 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestIncrementalBackupMergeWithFailures method TestIncBackupMergeRestore.

@Test
public void TestIncBackupMergeRestore() throws Exception {
    int ADD_ROWS = 99;
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    // Set custom Merge Job implementation
    conf1.setClass(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS, BackupMergeJobWithFailures.class, BackupMergeJob.class);
    Connection conn = ConnectionFactory.createConnection(conf1);
    Admin admin = conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table1
    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS);
    t1.close();
    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
    Table t2 = insertIntoTable(conn, table2, famName, 1, ADD_ROWS);
    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + ADD_ROWS);
    t2.close();
    LOG.debug("written " + ADD_ROWS + " rows to " + table2);
    // #3 - incremental backup for multiple tables
    tables = Lists.newArrayList(table1, table2);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple));
    t1 = insertIntoTable(conn, table1, famName, 2, ADD_ROWS);
    t1.close();
    t2 = insertIntoTable(conn, table2, famName, 2, ADD_ROWS);
    t2.close();
    // #3 - incremental backup for multiple tables
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple2 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple2));
    for (FailurePhase phase : FailurePhase.values()) {
        Configuration conf = conn.getConfiguration();
        conf.set(FAILURE_PHASE_KEY, phase.toString());
        try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
            String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
            bAdmin.mergeBackups(backups);
            Assert.fail("Expected IOException");
        } catch (IOException e) {
            BackupSystemTable table = new BackupSystemTable(conn);
            if (phase.ordinal() < FailurePhase.PHASE4.ordinal()) {
                // No need to repair:
                // Both Merge and backup exclusive operations are finished
                assertFalse(table.isMergeInProgress());
                try {
                    table.finishBackupExclusiveOperation();
                    Assert.fail("IOException is expected");
                } catch (IOException ee) {
                // Expected
                }
            } else {
                // Repair is required
                assertTrue(table.isMergeInProgress());
                try {
                    table.startBackupExclusiveOperation();
                    Assert.fail("IOException is expected");
                } catch (IOException ee) {
                // Expected - clean up before proceeding
                // table.finishMergeOperation();
                // table.finishBackupExclusiveOperation();
                }
            }
            table.close();
            LOG.debug("Expected :" + e.getMessage());
        }
    }
    // Now merge w/o failures
    Configuration conf = conn.getConfiguration();
    conf.unset(FAILURE_PHASE_KEY);
    conf.unset(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS);
    // Now run repair
    BackupSystemTable sysTable = new BackupSystemTable(conn);
    BackupCommands.RepairCommand.repairFailedBackupMergeIfAny(conn, sysTable);
    // Now repeat merge
    try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
        String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
        bAdmin.mergeBackups(backups);
    }
    // #6 - restore incremental backup for multiple tables, with overwrite
    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
    Table hTable = conn.getTable(table1_restore);
    LOG.debug("After incremental restore: " + hTable.getDescriptor());
    LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
    hTable.close();
    hTable = conn.getTable(table2_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
    hTable.close();
    admin.close();
    conn.close();
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 10 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestIncrementalBackupWithBulkLoad method TestIncBackupDeleteTable.

// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupDeleteTable() throws Exception {
    String testName = "TestIncBackupDeleteTable";
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1);
    Connection conn = ConnectionFactory.createConnection(conf1);
    Admin admin = conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table table1
    Table t1 = conn.getTable(table1);
    Put p1;
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        p1 = new Put(Bytes.toBytes("row-t1" + i));
        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t1.put(p1);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
    t1.close();
    int NB_ROWS2 = 20;
    LOG.debug("bulk loading into " + testName);
    int actual = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("cccc") }, new byte[][] { Bytes.toBytes("ddd"), Bytes.toBytes("ooo") } }, true, false, true, NB_ROWS_IN_BATCH * 2, NB_ROWS2);
    // #3 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple));
    // #4 bulk load again
    LOG.debug("bulk loading into " + testName);
    int actual1 = TestBulkLoadHFiles.loadHFiles(testName, table1Desc, TEST_UTIL, famName, qualName, false, null, new byte[][][] { new byte[][] { Bytes.toBytes("ppp"), Bytes.toBytes("qqq") }, new byte[][] { Bytes.toBytes("rrr"), Bytes.toBytes("sss") } }, true, false, true, NB_ROWS_IN_BATCH * 2 + actual, NB_ROWS2);
    // #5 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple1 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple1));
    // Delete all data in table1
    TEST_UTIL.deleteTableData(table1);
    // #5.1 - check tables for full restore */
    Admin hAdmin = TEST_UTIL.getAdmin();
    // #6 - restore incremental backup for table1
    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
    // TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple1, false, tablesRestoreIncMultiple, tablesRestoreIncMultiple, true));
    Table hTable = conn.getTable(table1);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2 + actual + actual1);
    request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    backupIdFull = client.backupTables(request);
    try (final BackupSystemTable table = new BackupSystemTable(conn)) {
        Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> pair = table.readBulkloadRows(tables);
        assertTrue("map still has " + pair.getSecond().size() + " entries", pair.getSecond().isEmpty());
    }
    assertTrue(checkSucceeded(backupIdFull));
    hTable.close();
    admin.close();
    conn.close();
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) List(java.util.List) Map(java.util.Map) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Aggregations

BackupAdminImpl (org.apache.hadoop.hbase.backup.impl.BackupAdminImpl)12 Connection (org.apache.hadoop.hbase.client.Connection)12 TableName (org.apache.hadoop.hbase.TableName)9 Admin (org.apache.hadoop.hbase.client.Admin)9 Table (org.apache.hadoop.hbase.client.Table)8 Test (org.junit.Test)8 Put (org.apache.hadoop.hbase.client.Put)6 BackupSystemTable (org.apache.hadoop.hbase.backup.impl.BackupSystemTable)4 IOException (java.io.IOException)2 BackupAdmin (org.apache.hadoop.hbase.backup.BackupAdmin)2 BackupRequest (org.apache.hadoop.hbase.backup.BackupRequest)2 HTable (org.apache.hadoop.hbase.client.HTable)2 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Configuration (org.apache.hadoop.conf.Configuration)1 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)1 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)1