Search in sources :

Example 26 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class CreateSnapshot method doWork.

@Override
protected int doWork() throws Exception {
    Connection connection = null;
    Admin admin = null;
    try {
        connection = ConnectionFactory.createConnection(getConf());
        admin = connection.getAdmin();
        admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType));
    } catch (Exception e) {
        System.err.println("failed to take the snapshot: " + e.getMessage());
        return -1;
    } finally {
        if (admin != null) {
            admin.close();
        }
        if (connection != null) {
            connection.close();
        }
    }
    return 0;
}
Also used : Connection(org.apache.hadoop.hbase.client.Connection) SnapshotDescription(org.apache.hadoop.hbase.client.SnapshotDescription) Admin(org.apache.hadoop.hbase.client.Admin)

Example 27 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestBackupLogCleaner method testBackupLogCleaner.

// implements all test cases in 1 test since incremental full backup/
// incremental backup has dependencies
@Test
public void testBackupLogCleaner() throws Exception {
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tableSetFullList = Lists.newArrayList(table1, table2, table3, table4);
    try (BackupSystemTable systemTable = new BackupSystemTable(TEST_UTIL.getConnection())) {
        // Verify that we have no backup sessions yet
        assertFalse(systemTable.hasBackupSessions());
        List<FileStatus> walFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
        List<String> swalFiles = convert(walFiles);
        BackupLogCleaner cleaner = new BackupLogCleaner();
        cleaner.setConf(TEST_UTIL.getConfiguration());
        cleaner.init(null);
        cleaner.setConf(TEST_UTIL.getConfiguration());
        Iterable<FileStatus> deletable = cleaner.getDeletableFiles(walFiles);
        int size = Iterables.size(deletable);
        // We can delete all files because we do not have yet recorded backup sessions
        assertTrue(size == walFiles.size());
        systemTable.addWALFiles(swalFiles, "backup", "root");
        String backupIdFull = fullTableBackup(tableSetFullList);
        assertTrue(checkSucceeded(backupIdFull));
        // Check one more time
        deletable = cleaner.getDeletableFiles(walFiles);
        // We can delete wal files because they were saved into backup system table table
        size = Iterables.size(deletable);
        assertTrue(size == walFiles.size());
        List<FileStatus> newWalFiles = getListOfWALFiles(TEST_UTIL.getConfiguration());
        LOG.debug("WAL list after full backup");
        convert(newWalFiles);
        // New list of wal files is greater than the previous one,
        // because new wal per RS have been opened after full backup
        assertTrue(walFiles.size() < newWalFiles.size());
        Connection conn = ConnectionFactory.createConnection(conf1);
        // #2 - insert some data to table
        HTable t1 = (HTable) conn.getTable(table1);
        Put p1;
        for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
            p1 = new Put(Bytes.toBytes("row-t1" + i));
            p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
            t1.put(p1);
        }
        t1.close();
        HTable t2 = (HTable) conn.getTable(table2);
        Put p2;
        for (int i = 0; i < 5; i++) {
            p2 = new Put(Bytes.toBytes("row-t2" + i));
            p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
            t2.put(p2);
        }
        t2.close();
        // #3 - incremental backup for multiple tables
        List<TableName> tableSetIncList = Lists.newArrayList(table1, table2, table3);
        String backupIdIncMultiple = backupTables(BackupType.INCREMENTAL, tableSetIncList, BACKUP_ROOT_DIR);
        assertTrue(checkSucceeded(backupIdIncMultiple));
        deletable = cleaner.getDeletableFiles(newWalFiles);
        assertTrue(Iterables.size(deletable) == newWalFiles.size());
        conn.close();
    }
}
Also used : BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) Connection(org.apache.hadoop.hbase.client.Connection) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 28 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestIncrementalBackupDeleteTable method TestIncBackupDeleteTable.

// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void TestIncBackupDeleteTable() throws Exception {
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    HBaseAdmin admin = null;
    Connection conn = ConnectionFactory.createConnection(conf1);
    admin = (HBaseAdmin) conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table table1
    HTable t1 = (HTable) conn.getTable(table1);
    Put p1;
    for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
        p1 = new Put(Bytes.toBytes("row-t1" + i));
        p1.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t1.put(p1);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH * 2);
    t1.close();
    // Delete table table2
    admin.disableTable(table2);
    admin.deleteTable(table2);
    // #3 - incremental backup for table1
    tables = Lists.newArrayList(table1);
    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
    String backupIdIncMultiple = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdIncMultiple));
    // #4 - restore full backup for all tables, without overwrite
    TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
    TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false, tablesRestoreFull, tablesMapFull, false));
    // #5.1 - check tables for full restore
    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    assertTrue(hAdmin.tableExists(table2_restore));
    // #5.2 - checking row count of tables for full restore
    HTable hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
    hTable.close();
    hTable = (HTable) conn.getTable(table2_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
    hTable.close();
    // #6 - restore incremental backup for table1
    TableName[] tablesRestoreIncMultiple = new TableName[] { table1 };
    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore };
    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple, false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
    hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH * 2);
    hTable.close();
    admin.close();
    conn.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 29 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestRemoteBackup method testFullBackupRemote.

/**
   * Verify that a remote full backup is created on a single table with data correctly.
   * @throws Exception
   */
@Test
public void testFullBackupRemote() throws Exception {
    LOG.info("test remote full backup on a single table");
    final CountDownLatch latch = new CountDownLatch(1);
    final int NB_ROWS_IN_FAM3 = 6;
    final byte[] fam3Name = Bytes.toBytes("f3");
    final byte[] fam2Name = Bytes.toBytes("f2");
    final Connection conn = ConnectionFactory.createConnection(conf1);
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                latch.await();
            } catch (InterruptedException ie) {
            }
            try {
                HTable t1 = (HTable) conn.getTable(table1);
                Put p1;
                for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
                    p1 = new Put(Bytes.toBytes("row-t1" + i));
                    p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
                    t1.put(p1);
                }
                LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
                t1.close();
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    t.start();
    table1Desc.addFamily(new HColumnDescriptor(fam3Name));
    // family 2 is MOB enabled
    HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
    hcd.setMobEnabled(true);
    hcd.setMobThreshold(0L);
    table1Desc.addFamily(hcd);
    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
    SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
    HTable t1 = (HTable) conn.getTable(table1);
    int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    latch.countDown();
    String backupId = backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
    assertTrue(checkSucceeded(backupId));
    LOG.info("backup complete " + backupId);
    Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
    t.join();
    Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
    t1.close();
    TableName[] tablesRestoreFull = new TableName[] { table1 };
    TableName[] tablesMapFull = new TableName[] { table1_restore };
    BackupAdmin client = getBackupAdmin();
    client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tablesRestoreFull, tablesMapFull, false));
    // check tables for full restore
    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    // #5.2 - checking row count of tables for full restore
    HTable hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
    int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
    Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
    int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    Assert.assertEquals(rows0, rows1);
    hTable.close();
    hAdmin.close();
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Test(org.junit.Test)

Example 30 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestBackupBase method backupTables.

protected String backupTables(BackupType type, List<TableName> tables, String path) throws IOException {
    Connection conn = null;
    BackupAdmin badmin = null;
    String backupId;
    try {
        conn = ConnectionFactory.createConnection(conf1);
        badmin = new BackupAdminImpl(conn);
        BackupRequest request = createBackupRequest(type, tables, path);
        backupId = badmin.backupTables(request);
    } finally {
        if (badmin != null) {
            badmin.close();
        }
        if (conn != null) {
            conn.close();
        }
    }
    return backupId;
}
Also used : BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection)

Aggregations

Connection (org.apache.hadoop.hbase.client.Connection)297 Table (org.apache.hadoop.hbase.client.Table)191 Test (org.junit.Test)171 IOException (java.io.IOException)113 TableName (org.apache.hadoop.hbase.TableName)103 Result (org.apache.hadoop.hbase.client.Result)101 Admin (org.apache.hadoop.hbase.client.Admin)86 Scan (org.apache.hadoop.hbase.client.Scan)79 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)75 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)71 Put (org.apache.hadoop.hbase.client.Put)68 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)57 Delete (org.apache.hadoop.hbase.client.Delete)55 Configuration (org.apache.hadoop.conf.Configuration)53 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)51 Get (org.apache.hadoop.hbase.client.Get)48 InterruptedIOException (java.io.InterruptedIOException)45 Cell (org.apache.hadoop.hbase.Cell)41 CellScanner (org.apache.hadoop.hbase.CellScanner)34 ArrayList (java.util.ArrayList)25