Search in sources :

Example 31 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestRemoteBackup method testFullBackupRemote.

/**
   * Verify that a remote full backup is created on a single table with data correctly.
   * @throws Exception
   */
@Test
public void testFullBackupRemote() throws Exception {
    LOG.info("test remote full backup on a single table");
    final CountDownLatch latch = new CountDownLatch(1);
    final int NB_ROWS_IN_FAM3 = 6;
    final byte[] fam3Name = Bytes.toBytes("f3");
    final byte[] fam2Name = Bytes.toBytes("f2");
    final Connection conn = ConnectionFactory.createConnection(conf1);
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                latch.await();
            } catch (InterruptedException ie) {
            }
            try {
                HTable t1 = (HTable) conn.getTable(table1);
                Put p1;
                for (int i = 0; i < NB_ROWS_IN_FAM3; i++) {
                    p1 = new Put(Bytes.toBytes("row-t1" + i));
                    p1.addColumn(fam3Name, qualName, Bytes.toBytes("val" + i));
                    t1.put(p1);
                }
                LOG.debug("Wrote " + NB_ROWS_IN_FAM3 + " rows into family3");
                t1.close();
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
        }
    };
    t.start();
    table1Desc.addFamily(new HColumnDescriptor(fam3Name));
    // family 2 is MOB enabled
    HColumnDescriptor hcd = new HColumnDescriptor(fam2Name);
    hcd.setMobEnabled(true);
    hcd.setMobThreshold(0L);
    table1Desc.addFamily(hcd);
    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
    SnapshotTestingUtils.loadData(TEST_UTIL, table1, 50, fam2Name);
    HTable t1 = (HTable) conn.getTable(table1);
    int rows0 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    latch.countDown();
    String backupId = backupTables(BackupType.FULL, Lists.newArrayList(table1), BACKUP_REMOTE_ROOT_DIR);
    assertTrue(checkSucceeded(backupId));
    LOG.info("backup complete " + backupId);
    Assert.assertEquals(TEST_UTIL.countRows(t1, famName), NB_ROWS_IN_BATCH);
    t.join();
    Assert.assertEquals(TEST_UTIL.countRows(t1, fam3Name), NB_ROWS_IN_FAM3);
    t1.close();
    TableName[] tablesRestoreFull = new TableName[] { table1 };
    TableName[] tablesMapFull = new TableName[] { table1_restore };
    BackupAdmin client = getBackupAdmin();
    client.restore(BackupUtils.createRestoreRequest(BACKUP_REMOTE_ROOT_DIR, backupId, false, tablesRestoreFull, tablesMapFull, false));
    // check tables for full restore
    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
    assertTrue(hAdmin.tableExists(table1_restore));
    // #5.2 - checking row count of tables for full restore
    HTable hTable = (HTable) conn.getTable(table1_restore);
    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH);
    int cnt3 = TEST_UTIL.countRows(hTable, fam3Name);
    Assert.assertTrue(cnt3 >= 0 && cnt3 <= NB_ROWS_IN_FAM3);
    int rows1 = MobSnapshotTestingUtils.countMobRows(t1, fam2Name);
    Assert.assertEquals(rows0, rows1);
    hTable.close();
    hAdmin.close();
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) HTable(org.apache.hadoop.hbase.client.HTable) Put(org.apache.hadoop.hbase.client.Put) TableName(org.apache.hadoop.hbase.TableName) HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) Test(org.junit.Test)

Example 32 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestSizeFailures method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    // Uncomment the following lines if more verbosity is needed for
    // debugging (see HBASE-12285 for details).
    //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
    //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
    //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
    Configuration conf = TEST_UTIL.getConfiguration();
    // ignore sanity checks in the server
    conf.setBoolean("hbase.table.sanity.checks", true);
    TEST_UTIL.startMiniCluster(SLAVES);
    // Write a bunch of data
    TABLENAME = TableName.valueOf("testSizeFailures");
    List<byte[]> qualifiers = new ArrayList<>();
    for (int i = 1; i <= 10; i++) {
        qualifiers.add(Bytes.toBytes(Integer.toString(i)));
    }
    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
    HTableDescriptor desc = new HTableDescriptor(TABLENAME);
    desc.addFamily(hcd);
    byte[][] splits = new byte[9][2];
    for (int i = 1; i < 10; i++) {
        int split = 48 + i;
        splits[i - 1][0] = (byte) (split >>> 8);
        splits[i - 1][0] = (byte) (split);
    }
    TEST_UTIL.getAdmin().createTable(desc, splits);
    Connection conn = TEST_UTIL.getConnection();
    try (Table table = conn.getTable(TABLENAME)) {
        List<Put> puts = new LinkedList<>();
        for (int i = 0; i < NUM_ROWS; i++) {
            Put p = new Put(Bytes.toBytes(Integer.toString(i)));
            for (int j = 0; j < NUM_COLS; j++) {
                byte[] value = new byte[50];
                Bytes.random(value);
                p.addColumn(FAMILY, Bytes.toBytes(Integer.toString(j)), value);
            }
            puts.add(p);
            if (puts.size() == 1000) {
                table.batch(puts, null);
                puts.clear();
            }
        }
        if (puts.size() > 0) {
            table.batch(puts, null);
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) BeforeClass(org.junit.BeforeClass)

Example 33 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestSnapshotCloneIndependence method runTestSnapshotMetadataChangesIndependent.

/**
   * Add metadata, and verify that this only affects one table
   */
private void runTestSnapshotMetadataChangesIndependent() throws Exception {
    // Add a new column family to the original table
    byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
    HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2);
    admin.disableTable(originalTableName);
    admin.addColumnFamily(originalTableName, hcd);
    // Verify that it is not in the snapshot
    admin.enableTable(originalTableName);
    UTIL.waitTableAvailable(originalTableName);
    // get a description of the cloned table
    // get a list of its families
    // assert that the family is there
    HTableDescriptor originalTableDescriptor = originalTable.getTableDescriptor();
    HTableDescriptor clonedTableDescriptor = admin.getTableDescriptor(cloneTableName);
    Assert.assertTrue("The original family was not found. There is something wrong. ", originalTableDescriptor.hasFamily(TEST_FAM));
    Assert.assertTrue("The original family was not found in the clone. There is something wrong. ", clonedTableDescriptor.hasFamily(TEST_FAM));
    Assert.assertTrue("The new family was not found. ", originalTableDescriptor.hasFamily(TEST_FAM_2));
    Assert.assertTrue("The new family was not found. ", !clonedTableDescriptor.hasFamily(TEST_FAM_2));
}
Also used : HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 34 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestMasterCoprocessorExceptionWithRemove method testExceptionFromCoprocessorWhenCreatingTable.

@Test(timeout = 30000)
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    BuggyMasterObserver cp = (BuggyMasterObserver) host.findCoprocessor(BuggyMasterObserver.class.getName());
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // Set a watch on the zookeeper /hbase/master node. If the master dies,
    // the node will be deleted.
    // Master should *NOT* die:
    // we are testing that the default setting of hbase.coprocessor.abortonerror
    // =false
    // is respected.
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK error: " + why, e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZooKeeper tracker error, why=", e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    masterTracker.start();
    zkw.registerListener(masterTracker);
    // Test (part of the) output that should have be printed by master when it aborts:
    // (namely the part that shows the set of loaded coprocessors).
    // In this test, there is only a single coprocessor (BuggyMasterObserver).
    String coprocessorName = BuggyMasterObserver.class.getName();
    assertTrue(HMaster.getLoadedCoprocessors().contains(coprocessorName));
    HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf(TEST_TABLE1));
    htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1));
    boolean threwDNRE = false;
    try {
        Admin admin = UTIL.getAdmin();
        admin.createTable(htd1);
    } catch (IOException e) {
        if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) {
            threwDNRE = true;
        }
    } finally {
        assertTrue(threwDNRE);
    }
    // wait for a few seconds to make sure that the Master hasn't aborted.
    try {
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        fail("InterruptedException while sleeping.");
    }
    assertFalse("Master survived coprocessor NPE, as expected.", masterTracker.masterZKNodeWasDeleted);
    String loadedCoprocessors = HMaster.getLoadedCoprocessors();
    assertTrue(loadedCoprocessors.contains(coprocessorName));
    // Verify that BuggyMasterObserver has been removed due to its misbehavior
    // by creating another table: should not have a problem this time.
    HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(TEST_TABLE2));
    htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2));
    Admin admin = UTIL.getAdmin();
    try {
        admin.createTable(htd2);
    } catch (IOException e) {
        fail("Failed to create table after buggy coprocessor removal: " + e);
    }
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Abortable(org.apache.hadoop.hbase.Abortable) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 35 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestMasterObserver method testTableOperations.

@Test(timeout = 180000)
public void testTableOperations() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    CPMasterObserver cp = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName());
    cp.enableBypass(true);
    cp.resetStates();
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // create a table
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
        Admin admin = connection.getAdmin()) {
        tableCreationLatch = new CountDownLatch(1);
        admin.createTable(htd, Arrays.copyOfRange(HBaseTestingUtility.KEYS, 1, HBaseTestingUtility.KEYS.length));
        // preCreateTable can't bypass default action.
        assertTrue("Test table should be created", cp.wasCreateTableCalled());
        tableCreationLatch.await();
        assertTrue("Table pre create handler called.", cp.wasPreCreateTableActionCalled());
        assertTrue("Table create handler should be called.", cp.wasCreateTableActionCalled());
        RegionLocator regionLocator = connection.getRegionLocator(htd.getTableName());
        List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
        admin.mergeRegionsAsync(regions.get(0).getRegionInfo().getEncodedNameAsBytes(), regions.get(1).getRegionInfo().getEncodedNameAsBytes(), true);
        assertTrue("Coprocessor should have been called on region merge", cp.wasMergeRegionsCalled());
        tableCreationLatch = new CountDownLatch(1);
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        // preDisableTable can't bypass default action.
        assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled());
        assertTrue("Disable table handler should be called.", cp.wasDisableTableActionCalled());
        // enable
        assertFalse(cp.wasEnableTableCalled());
        admin.enableTable(tableName);
        assertTrue(admin.isTableEnabled(tableName));
        // preEnableTable can't bypass default action.
        assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled());
        assertTrue("Enable table handler should be called.", cp.wasEnableTableActionCalled());
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        // modify table
        htd.setMaxFileSize(512 * 1024 * 1024);
        modifyTableSync(admin, tableName, htd);
        // preModifyTable can't bypass default action.
        assertTrue("Test table should have been modified", cp.wasModifyTableCalled());
        // add a column family
        admin.addColumnFamily(tableName, new HColumnDescriptor(TEST_FAMILY2));
        assertTrue("New column family shouldn't have been added to test table", cp.preAddColumnCalledOnly());
        // modify a column family
        HColumnDescriptor hcd1 = new HColumnDescriptor(TEST_FAMILY2);
        hcd1.setMaxVersions(25);
        admin.modifyColumnFamily(tableName, hcd1);
        assertTrue("Second column family should be modified", cp.preModifyColumnCalledOnly());
        // truncate table
        admin.truncateTable(tableName, false);
        // delete table
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        deleteTable(admin, tableName);
        assertFalse("Test table should have been deleted", admin.tableExists(tableName));
        // preDeleteTable can't bypass default action.
        assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled());
        assertTrue("Delete table handler should be called.", cp.wasDeleteTableActionCalled());
        // turn off bypass, run the tests again
        cp.enableBypass(false);
        cp.resetStates();
        admin.createTable(htd);
        assertTrue("Test table should be created", cp.wasCreateTableCalled());
        tableCreationLatch.await();
        assertTrue("Table pre create handler called.", cp.wasPreCreateTableActionCalled());
        assertTrue("Table create handler should be called.", cp.wasCreateTableActionCalled());
        // disable
        assertFalse(cp.wasDisableTableCalled());
        assertFalse(cp.wasDisableTableActionCalled());
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled());
        assertTrue("Disable table handler should be called.", cp.wasDisableTableActionCalled());
        // modify table
        htd.setMaxFileSize(512 * 1024 * 1024);
        modifyTableSync(admin, tableName, htd);
        assertTrue("Test table should have been modified", cp.wasModifyTableCalled());
        // add a column family
        admin.addColumnFamily(tableName, new HColumnDescriptor(TEST_FAMILY2));
        assertTrue("New column family should have been added to test table", cp.wasAddColumnCalled());
        assertTrue("Add column handler should be called.", cp.wasAddColumnFamilyActionCalled());
        // modify a column family
        HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2);
        hcd.setMaxVersions(25);
        admin.modifyColumnFamily(tableName, hcd);
        assertTrue("Second column family should be modified", cp.wasModifyColumnCalled());
        assertTrue("Modify table handler should be called.", cp.wasModifyColumnFamilyActionCalled());
        // enable
        assertFalse(cp.wasEnableTableCalled());
        assertFalse(cp.wasEnableTableActionCalled());
        admin.enableTable(tableName);
        assertTrue(admin.isTableEnabled(tableName));
        assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled());
        assertTrue("Enable table handler should be called.", cp.wasEnableTableActionCalled());
        // disable again
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        // delete column
        assertFalse("No column family deleted yet", cp.wasDeleteColumnCalled());
        assertFalse("Delete table column handler should not be called.", cp.wasDeleteColumnFamilyActionCalled());
        admin.deleteColumnFamily(tableName, TEST_FAMILY2);
        HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
        assertNull("'" + Bytes.toString(TEST_FAMILY2) + "' should have been removed", tableDesc.getFamily(TEST_FAMILY2));
        assertTrue("Coprocessor should have been called on column delete", cp.wasDeleteColumnCalled());
        assertTrue("Delete table column handler should be called.", cp.wasDeleteColumnFamilyActionCalled());
        // delete table
        assertFalse("No table deleted yet", cp.wasDeleteTableCalled());
        assertFalse("Delete table handler should not be called.", cp.wasDeleteTableActionCalled());
        deleteTable(admin, tableName);
        assertFalse("Test table should have been deleted", admin.tableExists(tableName));
        assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled());
        assertTrue("Delete table handler should be called.", cp.wasDeleteTableActionCalled());
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)679 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)561 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)118 IOException (java.io.IOException)112 Admin (org.apache.hadoop.hbase.client.Admin)112 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)74 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)52 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38