Search in sources :

Example 16 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestRemoteBackup method testFullBackupRemote.

/**
 * Verify that a remote full backup is created on a single table with data correctly.
 *
 * @throws Exception if an operation on the table fails
 */
@Test
public void testFullBackupRemote() throws Exception {
    LOG.info("test remote full backup on a single table");
    final CountDownLatch latch = new CountDownLatch(1);
    final int NB_ROWS_IN_FAM3 = 6;
    final byte[] fam3Name = Bytes.toBytes("f3");
    final byte[] fam2Name = Bytes.toBytes("f2");
    final Connection conn = ConnectionFactory.createConnection(conf1);
    Thread t = new Thread(() -> {
        try {
            latch.await();
        } catch (InterruptedException ie) {
        }
        try {
            Table t1 = conn.getTable(table1);
            Put p1;
            for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
                p1 = new Put(Bytes.toBytes("row-t1" + i));
                p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
                t1.put(p1);
            }
            LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
            t1.close();
        } catch (IOException ioe) {
            throw new RuntimeException(ioe);
        }
    });
    t.start();
    // family 2 is MOB enabled
    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(fam2Name).setMobEnabled(true).setMobThreshold(0L).build()).build();
    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
    SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
    Table t1 = conn.getTable(table1);
    int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    latch.countDown();
    String backupId = backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
    assertTrue(checkSucceeded(backupId));
    LOG.info("backup complete " + backupId);
    Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
    t.join();
    Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
    t1.close();
    TableName[] tablesRestoreFull = new TableName[] { table1 };
    TableName[] tablesMapFull = new TableName[] { table1_restore };
    BackupAdmin client = getBackupAdmin();
    client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tablesRestoreFull, tablesMapFull, false));
    // check tables for full restore
    Admin hAdmin = TEST_UTIL.getAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    // #5.2 - checking row count of tables for full restore
    Table hTable = conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
    int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
    Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
    int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    Assert.assertEquals(rows0, rows1);
    hTable.close();
    hAdmin.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Test(org.junit.Test)

Example 17 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestIncrementalBackupWithFailures method testIncBackupRestore.

// implement all test cases in 1 test since incremental backup/restore has dependencies
@Test
public void testIncBackupRestore() throws Exception {
    int ADD_ROWS = 99;
    // #1 - create full backup for all tables
    LOG.info("create full backup image for all tables");
    List<TableName> tables = Lists.newArrayList(table1, table2);
    final byte[] fam3Name = Bytes.toBytes("f3");
    TableDescriptor newTable1Desc = TableDescriptorBuilder.newBuilder(table1Desc).setColumnFamily(ColumnFamilyDescriptorBuilder.of(fam3Name)).build();
    TEST_UTIL.getAdmin().modifyTable(newTable1Desc);
    Connection conn = ConnectionFactory.createConnection(conf1);
    int NB_ROWS_FAM3 = 6;
    insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
    Admin admin = conn.getAdmin();
    BackupAdminImpl client = new BackupAdminImpl(conn);
    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
    String backupIdFull = client.backupTables(request);
    assertTrue(checkSucceeded(backupIdFull));
    // #2 - insert some data to table
    Table t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
    t1.close();
    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
    Table t2 = conn.getTable(table2);
    Put p2;
    for (int i = 0; i < 5; i++) {
        p2 = new Put(Bytes.toBytes("row-t2" + i));
        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
        t2.put(p2);
    }
    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
    t2.close();
    LOG.debug("written " + 5 + " rows to " + table2);
    // #3 - incremental backup for multiple tables
    incrementalBackupWithFailures();
    admin.close();
    conn.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) BackupSystemTable(org.apache.hadoop.hbase.backup.impl.BackupSystemTable) Table(org.apache.hadoop.hbase.client.Table) BackupAdminImpl(org.apache.hadoop.hbase.backup.impl.BackupAdminImpl) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 18 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestBatchCoprocessorEndpoint method setupBeforeClass.

@BeforeClass
public static void setupBeforeClass() throws Exception {
    // set configure to indicate which cp should be loaded
    Configuration conf = util.getConfiguration();
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), ProtobufCoprocessorService.class.getName(), ColumnAggregationEndpointWithErrors.class.getName(), ColumnAggregationEndpointNullResponse.class.getName());
    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, ProtobufCoprocessorService.class.getName());
    util.startMiniCluster(2);
    Admin admin = util.getAdmin();
    TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build();
    admin.createTable(tableDescriptor, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] });
    util.waitUntilAllRegionsAssigned(TEST_TABLE);
    admin.close();
    Table table = util.getConnection().getTable(TEST_TABLE);
    for (int i = 0; i < ROWSIZE; i++) {
        Put put = new Put(ROWS[i]);
        put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
        table.put(put);
    }
    table.close();
}
Also used : Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) BeforeClass(org.junit.BeforeClass)

Example 19 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestVerifyReplication method testVerifyRepJobWithRawOptions.

/**
 * Load a row into a table, make sure the data is really the same, delete the row, make sure the
 * delete marker is replicated, run verify replication with and without raw to check the results.
 */
@Test
public void testVerifyRepJobWithRawOptions() throws Exception {
    LOG.info(name.getMethodName());
    final TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] familyname = Bytes.toBytes("fam_raw");
    byte[] row = Bytes.toBytes("row_raw");
    Table lHtable1 = null;
    Table lHtable2 = null;
    try {
        ColumnFamilyDescriptor fam = ColumnFamilyDescriptorBuilder.newBuilder(familyname).setMaxVersions(100).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build();
        TableDescriptor table = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(fam).build();
        Connection connection1 = ConnectionFactory.createConnection(CONF1);
        Connection connection2 = ConnectionFactory.createConnection(CONF2);
        try (Admin admin1 = connection1.getAdmin()) {
            admin1.createTable(table, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
        }
        try (Admin admin2 = connection2.getAdmin()) {
            admin2.createTable(table, HBaseTestingUtil.KEYS_FOR_HBA_CREATE_TABLE);
        }
        UTIL1.waitUntilAllRegionsAssigned(tableName);
        UTIL2.waitUntilAllRegionsAssigned(tableName);
        lHtable1 = UTIL1.getConnection().getTable(tableName);
        lHtable2 = UTIL2.getConnection().getTable(tableName);
        Put put = new Put(row);
        put.addColumn(familyname, row, row);
        lHtable1.put(put);
        Get get = new Get(row);
        for (int i = 0; i < NB_RETRIES; i++) {
            if (i == NB_RETRIES - 1) {
                fail("Waited too much time for put replication");
            }
            Result res = lHtable2.get(get);
            if (res.isEmpty()) {
                LOG.info("Row not available");
                Thread.sleep(SLEEP_TIME);
            } else {
                assertArrayEquals(res.value(), row);
                break;
            }
        }
        Delete del = new Delete(row);
        lHtable1.delete(del);
        get = new Get(row);
        for (int i = 0; i < NB_RETRIES; i++) {
            if (i == NB_RETRIES - 1) {
                fail("Waited too much time for del replication");
            }
            Result res = lHtable2.get(get);
            if (res.size() >= 1) {
                LOG.info("Row not deleted");
                Thread.sleep(SLEEP_TIME);
            } else {
                break;
            }
        }
        // Checking verifyReplication for the default behavior.
        String[] argsWithoutRaw = new String[] { PEER_ID, tableName.getNameAsString() };
        runVerifyReplication(argsWithoutRaw, 0, 0);
        // Checking verifyReplication with raw
        String[] argsWithRawAsTrue = new String[] { "--raw", PEER_ID, tableName.getNameAsString() };
        runVerifyReplication(argsWithRawAsTrue, 1, 0);
    } finally {
        if (lHtable1 != null) {
            lHtable1.close();
        }
        if (lHtable2 != null) {
            lHtable2.close();
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) Connection(org.apache.hadoop.hbase.client.Connection) ColumnFamilyDescriptor(org.apache.hadoop.hbase.client.ColumnFamilyDescriptor) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TableName(org.apache.hadoop.hbase.TableName) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 20 with TableDescriptor

use of org.apache.hadoop.hbase.client.TableDescriptor in project hbase by apache.

the class TestVerifyReplicationCrossDiffHdfs method createTestingTable.

private static void createTestingTable(Admin admin) throws IOException {
    TableDescriptor table = TableDescriptorBuilder.newBuilder(TABLE_NAME).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).setMaxVersions(100).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
    admin.createTable(table);
}
Also used : TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor)

Aggregations

TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)639 Test (org.junit.Test)356 TableName (org.apache.hadoop.hbase.TableName)237 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)180 IOException (java.io.IOException)151 Put (org.apache.hadoop.hbase.client.Put)142 Admin (org.apache.hadoop.hbase.client.Admin)136 Path (org.apache.hadoop.fs.Path)124 Table (org.apache.hadoop.hbase.client.Table)121 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)96 Configuration (org.apache.hadoop.conf.Configuration)91 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)77 ArrayList (java.util.ArrayList)75 FileSystem (org.apache.hadoop.fs.FileSystem)66 Result (org.apache.hadoop.hbase.client.Result)66 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)64 Connection (org.apache.hadoop.hbase.client.Connection)59 Scan (org.apache.hadoop.hbase.client.Scan)50 Get (org.apache.hadoop.hbase.client.Get)49 List (java.util.List)39