Search in sources :

Example 21 with BackupSystemTable

use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.

the class TestFullBackup method testFullBackupMultipleCommand.

@Test
public void testFullBackupMultipleCommand() throws Exception {
    LOG.info("test full backup on a multiple tables with data: command-line");
    try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
        int before = table.getBackupHistory().size();
        String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-t", table1.getNameAsString() + "," + table2.getNameAsString() };
        // Run backup
        int ret = ToolRunner.run(conf1, new BackupDriver(), args);
        assertTrue(ret == 0);
        List<BackupInfo> backups = table.getBackupHistory();
        int after = table.getBackupHistory().size();
        assertTrue(after == before + 1);
        for (BackupInfo data : backups) {
            String backupId = data.getBackupId();
            assertTrue(checkSucceeded(backupId));
        }
    }
    LOG.info("backup complete");
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Test(org.junit.Test)

Example 22 with BackupSystemTable

use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.

the class TestFullBackupSetRestoreSet method testFullRestoreSetToOtherTable.

@Test
public void testFullRestoreSetToOtherTable() throws Exception {
    LOG.info("Test full restore set");
    // Create set
    try (BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection())) {
        String name = "name";
        table.addToBackupSet(name, new String[] { table1.getNameAsString() });
        List<TableName> names = table.describeBackupSet(name);
        assertNotNull(names);
        assertTrue(names.size() == 1);
        assertTrue(names.get(0).equals(table1));
        String[] args = new String[] { "create", "full", BACKUP_ROOT_DIR, "-s", name };
        // Run backup
        int ret = ToolRunner.run(conf1, new BackupDriver(), args);
        assertTrue(ret == 0);
        List<BackupInfo> backups = table.getBackupHistory();
        assertTrue(backups.size() == 1);
        String backupId = backups.get(0).getBackupId();
        assertTrue(checkSucceeded(backupId));
        LOG.info("backup complete");
        // Restore from set into other table
        args = new String[] { BACKUP_ROOT_DIR, backupId, "-s", name, "-m", table1_restore.getNameAsString(), "-o" };
        // Run backup
        ret = ToolRunner.run(conf1, new RestoreDriver(), args);
        assertTrue(ret == 0);
        Admin hba = TEST_UTIL.getAdmin();
        assertTrue(hba.tableExists(table1_restore));
        // Verify number of rows in both tables
        assertEquals(TEST_UTIL.countRows(table1), TEST_UTIL.countRows(table1_restore));
        TEST_UTIL.deleteTable(table1_restore);
        LOG.info("restore into other table is complete");
        hba.close();
    }
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) TableName(org.apache.hadoop.hbase.TableName) Admin(org.apache.hadoop.hbase.client.Admin) Test(org.junit.Test)

Example 23 with BackupSystemTable

use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.

the class TestBackupDescribe method testBackupDescribeCommand.

@Test
public void testBackupDescribeCommand() throws Exception {
    LOG.info("test backup describe on a single table with data: command-line");
    List<TableName> tableList = Lists.newArrayList(table1);
    String backupId = fullTableBackup(tableList);
    LOG.info("backup complete");
    assertTrue(checkSucceeded(backupId));
    BackupInfo info = getBackupAdmin().getBackupInfo(backupId);
    assertTrue(info.getState() == BackupState.COMPLETE);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    System.setOut(new PrintStream(baos));
    String[] args = new String[] { "describe", backupId };
    // Run backup
    int ret = ToolRunner.run(conf1, new BackupDriver(), args);
    assertTrue(ret == 0);
    String response = baos.toString();
    assertTrue(response.indexOf(backupId) > 0);
    assertTrue(response.indexOf("COMPLETE") > 0);
    BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
    BackupInfo status = table.readBackupInfo(backupId);
    String desc = status.getShortDescription();
    table.close();
    assertTrue(response.indexOf(desc) >= 0);
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) TableName(org.apache.hadoop.hbase.TableName) PrintStream(java.io.PrintStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Test(org.junit.Test)

Example 24 with BackupSystemTable

use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.

the class TestBackupDelete method testBackupDelete.

/**
 * Verify that full backup is created on a single table with data correctly. Verify that history
 * works as expected.
 *
 * @throws Exception if doing the backup or an operation on the tables fails
 */
@Test
public void testBackupDelete() throws Exception {
    LOG.info("test backup delete on a single table with data");
    List<TableName> tableList = Lists.newArrayList(table1);
    String backupId = fullTableBackup(tableList);
    assertTrue(checkSucceeded(backupId));
    LOG.info("backup complete");
    String[] backupIds = new String[] { backupId };
    BackupSystemTable table = new BackupSystemTable(TEST_UTIL.getConnection());
    BackupInfo info = table.readBackupInfo(backupId);
    Path path = new Path(info.getBackupRootDir(), backupId);
    FileSystem fs = FileSystem.get(path.toUri(), conf1);
    assertTrue(fs.exists(path));
    int deleted = getBackupAdmin().deleteBackups(backupIds);
    assertTrue(!fs.exists(path));
    assertTrue(fs.exists(new Path(info.getBackupRootDir())));
    assertTrue(1 == deleted);
    table.close();
    LOG.info("delete_backup");
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 25 with BackupSystemTable

use of org.apache.hadoop.hbase.backup.impl.BackupSystemTable in project hbase by apache.

the class TestBackupHFileCleaner method testGetDeletableFiles.

@Test
public void testGetDeletableFiles() throws IOException {
    // 1. Create a file
    Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
    fs.createNewFile(file);
    // 2. Assert file is successfully created
    assertTrue("Test file not created!", fs.exists(file));
    BackupHFileCleaner cleaner = new BackupHFileCleaner();
    cleaner.setConf(conf);
    cleaner.setCheckForFullyBackedUpTables(false);
    // 3. Assert that file as is should be deletable
    List<FileStatus> stats = new ArrayList<>();
    FileStatus stat = fs.getFileStatus(file);
    stats.add(stat);
    Iterable<FileStatus> deletable = cleaner.getDeletableFiles(stats);
    deletable = cleaner.getDeletableFiles(stats);
    boolean found = false;
    for (FileStatus stat1 : deletable) {
        if (stat.equals(stat1)) {
            found = true;
        }
    }
    assertTrue("Cleaner should allow to delete this file as there is no hfile reference " + "for it.", found);
    // 4. Add the file as bulk load
    List<Path> list = new ArrayList<>(1);
    list.add(file);
    try (Connection conn = ConnectionFactory.createConnection(conf);
        BackupSystemTable sysTbl = new BackupSystemTable(conn)) {
        List<TableName> sTableList = new ArrayList<>();
        sTableList.add(tableName);
        Map<byte[], List<Path>>[] maps = new Map[1];
        maps[0] = new HashMap<>();
        maps[0].put(Bytes.toBytes(famName), list);
        sysTbl.writeBulkLoadedFiles(sTableList, maps, "1");
    }
    // 5. Assert file should not be deletable
    deletable = cleaner.getDeletableFiles(stats);
    deletable = cleaner.getDeletableFiles(stats);
    found = false;
    for (FileStatus stat1 : deletable) {
        if (stat.equals(stat1)) {
            found = true;
        }
    }
    assertFalse("Cleaner should not allow to delete this file as there is a hfile reference " + "for it.", found);
}
Also used : Path(org.apache.hadoop.fs.Path) BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) Connection(org.apache.hadoop.hbase.client.Connection) TableName(org.apache.hadoop.hbase.TableName) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Aggregations

BackupSystemTable (org.apache.hadoop.hbase.backup.impl.BackupSystemTable)30 TableName (org.apache.hadoop.hbase.TableName)24 Test (org.junit.Test)20 Connection (org.apache.hadoop.hbase.client.Connection)10 Admin (org.apache.hadoop.hbase.client.Admin)7 Path (org.apache.hadoop.fs.Path)6 IOException (java.io.IOException)5 HashSet (java.util.HashSet)5 Configuration (org.apache.hadoop.conf.Configuration)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 Map (java.util.Map)3 FileStatus (org.apache.hadoop.fs.FileStatus)3 Table (org.apache.hadoop.hbase.client.Table)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 BackupInfo (org.apache.hadoop.hbase.backup.BackupInfo)2 BackupAdminImpl (org.apache.hadoop.hbase.backup.impl.BackupAdminImpl)2 Put (org.apache.hadoop.hbase.client.Put)2 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)2 Pair (org.apache.hadoop.hbase.util.Pair)2