Search in sources :

Example 11 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class TestBackupMultipleDeletes method testBackupMultipleDeletes.

@Test
public void testBackupMultipleDeletes() throws Exception {
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    Connection conn = ConnectionFactory.createConnection(conf1);
    Admin admin = conn.getAdmin();
    BackupAdmin client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table table1
    Table t1 = conn.getTable(table1);
    Put p1;
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        p1 = new Put(Bytes.toBytes("row-t1" + i));
        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t1.put(p1);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
    t1.close();
    // #3 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdInc1 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdInc1));
    // #4 - insert some data to table table2
    Table t2 = conn.getTable(table2);
    Put p2 = null;
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        p2 = new Put(Bytes.toBytes("row-t2" + i));
        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t2.put(p2);
    }
    // #5 - incremental backup for table1, table2
    tables = Lists.newArrayList(table1, table2);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdInc2 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdInc2));
    // #6 - insert some data to table table1
    t1 = conn.getTable(table1);
    for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
        p1 = new Put(Bytes.toBytes("row-t1" + i));
        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t1.put(p1);
    }
    // #7 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdInc3 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdInc3));
    // #8 - insert some data to table table2
    t2 = conn.getTable(table2);
    for (int i = NB_ROWS_IN_BATCH; i < 2 * NB_ROWS_IN_BATCH; i++) {
        p2 = new Put(Bytes.toBytes("row-t1" + i));
        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t2.put(p2);
    }
    // #9 - incremental backup for table1, table2
    tables = Lists.newArrayList(table1, table2);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdInc4 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdInc4));
    // #10 full backup for table3
    tables = Lists.newArrayList(table3);
    request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull2 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull2));
    // #11 - incremental backup for table3
    tables = Lists.newArrayList(table3);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdInc5 = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdInc5));
    LOG.error("Delete backupIdInc2");
    client.deleteBackups(new String[] { backupIdInc2 });
    LOG.error("Delete backupIdInc2 done");
    List<BackupInfo> list = client.getHistory(100);
    // First check number of backup images before and after
    assertEquals(4, list.size());
    // then verify that no backupIdInc2,3,4
    Set<String> ids = new HashSet<String>();
    ids.add(backupIdInc2);
    ids.add(backupIdInc3);
    ids.add(backupIdInc4);
    for (BackupInfo info : list) {
        String backupId = info.getBackupId();
        if (ids.contains(backupId)) {
            assertTrue(false);
        }
    }
    // Verify that backupInc5 contains only table3
    boolean found = false;
    for (BackupInfo info : list) {
        String backupId = info.getBackupId();
        if (backupId.equals(backupIdInc5)) {
            assertTrue(info.getTables().size() == 1);
            assertEquals(table3, info.getTableNames().get(0));
            found = true;
        }
    }
    assertTrue(found);
    admin.close();
    conn.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 12 with BackupAdminImpl

use of org.apache.hadoop.hbase.backup.impl.BackupAdminImpl in project hbase by apache.

the class RestoreDriver method parseAndRun.

private int parseAndRun(String[] args) throws IOException {
    // Check if backup is enabled
    if (!BackupManager.isBackupEnabled(getConf())) {
        System.err.println(BackupRestoreConstants.ENABLE_BACKUP);
        return -1;
    }
    // enable debug logging
    if (cmd.hasOption(OPTION_DEBUG)) {
        Log4jUtils.setLogLevel("org.apache.hadoop.hbase.backup", "DEBUG");
    }
    // whether to overwrite to existing table if any, false by default
    boolean overwrite = cmd.hasOption(OPTION_OVERWRITE);
    if (overwrite) {
        LOG.debug("Found -overwrite option in restore command, " + "will overwrite to existing table if any in the restore target");
    }
    // whether to only check the dependencies, false by default
    boolean check = cmd.hasOption(OPTION_CHECK);
    if (check) {
        LOG.debug("Found -check option in restore command, " + "will check and verify the dependencies");
    }
    if (cmd.hasOption(OPTION_SET) && cmd.hasOption(OPTION_TABLE)) {
        System.err.println("Options -s and -t are mutaully exclusive," + " you can not specify both of them.");
        printToolUsage();
        return -1;
    }
    if (!cmd.hasOption(OPTION_SET) && !cmd.hasOption(OPTION_TABLE)) {
        System.err.println("You have to specify either set name or table list to restore");
        printToolUsage();
        return -1;
    }
    if (cmd.hasOption(OPTION_YARN_QUEUE_NAME)) {
        String queueName = cmd.getOptionValue(OPTION_YARN_QUEUE_NAME);
        // Set system property value for MR job
        System.setProperty("mapreduce.job.queuename", queueName);
    }
    // parse main restore command options
    String[] remainArgs = cmd.getArgs();
    if (remainArgs.length != 2) {
        printToolUsage();
        return -1;
    }
    String backupRootDir = remainArgs[0];
    String backupId = remainArgs[1];
    String tables;
    String tableMapping = cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
    try (final Connection conn = ConnectionFactory.createConnection(conf);
        BackupAdmin client = new BackupAdminImpl(conn)) {
        // Check backup set
        if (cmd.hasOption(OPTION_SET)) {
            String setName = cmd.getOptionValue(OPTION_SET);
            try {
                tables = getTablesForSet(conn, setName, conf);
            } catch (IOException e) {
                System.out.println("ERROR: " + e.getMessage() + " for setName=" + setName);
                printToolUsage();
                return -2;
            }
            if (tables == null) {
                System.out.println("ERROR: Backup set '" + setName + "' is either empty or does not exist");
                printToolUsage();
                return -3;
            }
        } else {
            tables = cmd.getOptionValue(OPTION_TABLE);
        }
        TableName[] sTableArray = BackupUtils.parseTableNames(tables);
        TableName[] tTableArray = BackupUtils.parseTableNames(tableMapping);
        if (sTableArray != null && tTableArray != null && (sTableArray.length != tTableArray.length)) {
            System.out.println("ERROR: table mapping mismatch: " + tables + " : " + tableMapping);
            printToolUsage();
            return -4;
        }
        client.restore(BackupUtils.createRestoreRequest(backupRootDir, backupId, check, sTableArray, tTableArray, overwrite));
    } catch (Exception e) {
        LOG.error("Error while running restore backup", e);
        return -5;
    }
    return 0;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) IOException(java.io.IOException)

Aggregations

BackupAdminImpl (org.apache.hadoop.hbase.backup.impl.BackupAdminImpl)12 Connection (org.apache.hadoop.hbase.client.Connection)12 TableName (org.apache.hadoop.hbase.TableName)9 Admin (org.apache.hadoop.hbase.client.Admin)9 Table (org.apache.hadoop.hbase.client.Table)8 Test (org.junit.Test)8 Put (org.apache.hadoop.hbase.client.Put)6 BackupSystemTable (org.apache.hadoop.hbase.backup.impl.BackupSystemTable)4 IOException (java.io.IOException)2 BackupAdmin (org.apache.hadoop.hbase.backup.BackupAdmin)2 BackupRequest (org.apache.hadoop.hbase.backup.BackupRequest)2 HTable (org.apache.hadoop.hbase.client.HTable)2 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Configuration (org.apache.hadoop.conf.Configuration)1 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)1 ColumnFamilyDescriptorBuilder (org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder)1