Search in sources :

Example 1 with BackupRequest

use of org.apache.hadoop.hbase.backup.BackupRequest in project hbase by apache.

the class BackupAdminImpl method backupTables.

@Override
public String backupTables(BackupRequest request) throws IOException {
    BackupType type = request.getBackupType();
    String targetRootDir = request.getTargetRootDir();
    List<TableName> tableList = request.getTableList();
    String backupId = BackupRestoreConstants.BACKUPID_PREFIX + EnvironmentEdgeManager.currentTime();
    if (type == BackupType.INCREMENTAL) {
        Set<TableName> incrTableSet = null;
        try (BackupSystemTable table = new BackupSystemTable(conn)) {
            incrTableSet = table.getIncrementalBackupTableSet(targetRootDir);
        }
        if (incrTableSet.isEmpty()) {
            String msg = "Incremental backup table set contains no tables. " + "You need to run full backup first " + (tableList != null ? "on " + StringUtils.join(tableList, ",") : "");
            throw new IOException(msg);
        }
        if (tableList != null) {
            tableList.removeAll(incrTableSet);
            if (!tableList.isEmpty()) {
                String extraTables = StringUtils.join(tableList, ",");
                String msg = "Some tables (" + extraTables + ") haven't gone through full backup. " + "Perform full backup on " + extraTables + " first, " + "then retry the command";
                throw new IOException(msg);
            }
        }
        tableList = Lists.newArrayList(incrTableSet);
    }
    if (tableList != null && !tableList.isEmpty()) {
        for (TableName table : tableList) {
            String targetTableBackupDir = HBackupFileSystem.getTableBackupDir(targetRootDir, backupId, table);
            Path targetTableBackupDirPath = new Path(targetTableBackupDir);
            FileSystem outputFs = FileSystem.get(targetTableBackupDirPath.toUri(), conn.getConfiguration());
            if (outputFs.exists(targetTableBackupDirPath)) {
                throw new IOException("Target backup directory " + targetTableBackupDir + " exists already.");
            }
        }
        ArrayList<TableName> nonExistingTableList = null;
        try (Admin admin = conn.getAdmin()) {
            for (TableName tableName : tableList) {
                if (!admin.tableExists(tableName)) {
                    if (nonExistingTableList == null) {
                        nonExistingTableList = new ArrayList<>();
                    }
                    nonExistingTableList.add(tableName);
                }
            }
        }
        if (nonExistingTableList != null) {
            if (type == BackupType.INCREMENTAL) {
                // Update incremental backup set
                tableList = excludeNonExistingTables(tableList, nonExistingTableList);
            } else {
                // Throw exception only in full mode - we try to backup non-existing table
                throw new IOException("Non-existing tables found in the table list: " + nonExistingTableList);
            }
        }
    }
    // update table list
    BackupRequest.Builder builder = new BackupRequest.Builder();
    request = builder.withBackupType(request.getBackupType()).withTableList(tableList).withTargetRootDir(request.getTargetRootDir()).withBackupSetName(request.getBackupSetName()).withTotalTasks(request.getTotalTasks()).withBandwidthPerTasks((int) request.getBandwidth()).build();
    if (type == BackupType.FULL) {
        new FullTableBackupClient(conn, backupId, request).execute();
    } else {
        new IncrementalTableBackupClient(conn, backupId, request).execute();
    }
    return backupId;
}
Also used : Path(org.apache.hadoop.fs.Path) BackupRequest(org.apache.hadoop.hbase.backup.BackupRequest) IOException(java.io.IOException) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) FileSystem(org.apache.hadoop.fs.FileSystem) HBackupFileSystem(org.apache.hadoop.hbase.backup.HBackupFileSystem) BackupType(org.apache.hadoop.hbase.backup.BackupType)

Example 2 with BackupRequest

use of org.apache.hadoop.hbase.backup.BackupRequest in project hbase by apache.

the class IntegrationTestBackupRestore method runTest.

private void runTest() throws IOException {
    try (Connection conn = util.getConnection();
        Admin admin = conn.getAdmin();
        BackupAdmin client = new BackupAdminImpl(conn)) {
        // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
        loadData(TABLE_NAME1, rowsInBatch);
        loadData(TABLE_NAME2, rowsInBatch);
        // #1 - create full backup for all tables
        LOG.info("create full backup image for all tables");
        List<TableName> tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
        BackupRequest.Builder builder = new BackupRequest.Builder();
        BackupRequest request = builder.withBackupType(BackupType.FULL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdFull = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdFull));
        // #2 - insert some data to table
        loadData(TABLE_NAME1, rowsInBatch);
        loadData(TABLE_NAME2, rowsInBatch);
        HTable t1 = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(t1), rowsInBatch * 2);
        t1.close();
        HTable t2 = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(t2), rowsInBatch * 2);
        t2.close();
        // #3 - incremental backup for tables
        tables = Lists.newArrayList(TABLE_NAME1, TABLE_NAME2);
        builder = new BackupRequest.Builder();
        request = builder.withBackupType(BackupType.INCREMENTAL).withTableList(tables).withTargetRootDir(BACKUP_ROOT_DIR).build();
        String backupIdIncMultiple = client.backupTables(request);
        assertTrue(checkSucceeded(backupIdIncMultiple));
        // #4 - restore full backup for all tables, without overwrite
        TableName[] tablesRestoreFull = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
        client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, null, true));
        // #5.1 - check tables for full restore
        assertTrue(admin.tableExists(TABLE_NAME1));
        assertTrue(admin.tableExists(TABLE_NAME2));
        // #5.2 - checking row count of tables for full restore
        HTable hTable = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch);
        hTable.close();
        hTable = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch);
        hTable.close();
        // #6 - restore incremental backup for multiple tables, with overwrite
        TableName[] tablesRestoreIncMultiple = new TableName[] { TABLE_NAME1, TABLE_NAME2 };
        client.restore(createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, null, true));
        hTable = (HTable) conn.getTable(TABLE_NAME1);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
        hTable.close();
        hTable = (HTable) conn.getTable(TABLE_NAME2);
        Assert.assertEquals(util.countRows(hTable), rowsInBatch * 2);
        hTable.close();
    }
}
Also used : BackupRequest(org.apache.hadoop.hbase.backup.BackupRequest) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) BackupAdmin(org.apache.hadoop.hbase.backup.BackupAdmin) Admin(org.apache.hadoop.hbase.client.Admin) HTable(org.apache.hadoop.hbase.client.HTable)

Aggregations

BackupAdmin (org.apache.hadoop.hbase.backup.BackupAdmin)2 BackupRequest (org.apache.hadoop.hbase.backup.BackupRequest)2 Admin (org.apache.hadoop.hbase.client.Admin)2 IOException (java.io.IOException)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 TableName (org.apache.hadoop.hbase.TableName)1 BackupType (org.apache.hadoop.hbase.backup.BackupType)1 HBackupFileSystem (org.apache.hadoop.hbase.backup.HBackupFileSystem)1 BackupAdminImpl (org.apache.hadoop.hbase.backup.impl.BackupAdminImpl)1 Connection (org.apache.hadoop.hbase.client.Connection)1 HTable (org.apache.hadoop.hbase.client.HTable)1