Search in sources :

Example 91 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestMasterCoprocessorExceptionWithRemove method testExceptionFromCoprocessorWhenCreatingTable.

@Test(timeout = 30000)
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    BuggyMasterObserver cp = (BuggyMasterObserver) host.findCoprocessor(BuggyMasterObserver.class.getName());
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // Set a watch on the zookeeper /hbase/master node. If the master dies,
    // the node will be deleted.
    // Master should *NOT* die:
    // we are testing that the default setting of hbase.coprocessor.abortonerror
    // =false
    // is respected.
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK error: " + why, e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZooKeeper tracker error, why=", e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    masterTracker.start();
    zkw.registerListener(masterTracker);
    // Test (part of the) output that should have be printed by master when it aborts:
    // (namely the part that shows the set of loaded coprocessors).
    // In this test, there is only a single coprocessor (BuggyMasterObserver).
    String coprocessorName = BuggyMasterObserver.class.getName();
    assertTrue(HMaster.getLoadedCoprocessors().contains(coprocessorName));
    HTableDescriptor htd1 = new HTableDescriptor(TableName.valueOf(TEST_TABLE1));
    htd1.addFamily(new HColumnDescriptor(TEST_FAMILY1));
    boolean threwDNRE = false;
    try {
        Admin admin = UTIL.getAdmin();
        admin.createTable(htd1);
    } catch (IOException e) {
        if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) {
            threwDNRE = true;
        }
    } finally {
        assertTrue(threwDNRE);
    }
    // wait for a few seconds to make sure that the Master hasn't aborted.
    try {
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        fail("InterruptedException while sleeping.");
    }
    assertFalse("Master survived coprocessor NPE, as expected.", masterTracker.masterZKNodeWasDeleted);
    String loadedCoprocessors = HMaster.getLoadedCoprocessors();
    assertTrue(loadedCoprocessors.contains(coprocessorName));
    // Verify that BuggyMasterObserver has been removed due to its misbehavior
    // by creating another table: should not have a problem this time.
    HTableDescriptor htd2 = new HTableDescriptor(TableName.valueOf(TEST_TABLE2));
    htd2.addFamily(new HColumnDescriptor(TEST_FAMILY2));
    Admin admin = UTIL.getAdmin();
    try {
        admin.createTable(htd2);
    } catch (IOException e) {
        fail("Failed to create table after buggy coprocessor removal: " + e);
    }
}
Also used : MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Abortable(org.apache.hadoop.hbase.Abortable) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 92 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestMasterObserver method testTableOperations.

@Test(timeout = 180000)
public void testTableOperations() throws Exception {
    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    CPMasterObserver cp = (CPMasterObserver) host.findCoprocessor(CPMasterObserver.class.getName());
    cp.enableBypass(true);
    cp.resetStates();
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // create a table
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
        Admin admin = connection.getAdmin()) {
        tableCreationLatch = new CountDownLatch(1);
        admin.createTable(htd, Arrays.copyOfRange(HBaseTestingUtility.KEYS, 1, HBaseTestingUtility.KEYS.length));
        // preCreateTable can't bypass default action.
        assertTrue("Test table should be created", cp.wasCreateTableCalled());
        tableCreationLatch.await();
        assertTrue("Table pre create handler called.", cp.wasPreCreateTableActionCalled());
        assertTrue("Table create handler should be called.", cp.wasCreateTableActionCalled());
        RegionLocator regionLocator = connection.getRegionLocator(htd.getTableName());
        List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
        admin.mergeRegionsAsync(regions.get(0).getRegionInfo().getEncodedNameAsBytes(), regions.get(1).getRegionInfo().getEncodedNameAsBytes(), true);
        assertTrue("Coprocessor should have been called on region merge", cp.wasMergeRegionsCalled());
        tableCreationLatch = new CountDownLatch(1);
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        // preDisableTable can't bypass default action.
        assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled());
        assertTrue("Disable table handler should be called.", cp.wasDisableTableActionCalled());
        // enable
        assertFalse(cp.wasEnableTableCalled());
        admin.enableTable(tableName);
        assertTrue(admin.isTableEnabled(tableName));
        // preEnableTable can't bypass default action.
        assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled());
        assertTrue("Enable table handler should be called.", cp.wasEnableTableActionCalled());
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        // modify table
        htd.setMaxFileSize(512 * 1024 * 1024);
        modifyTableSync(admin, tableName, htd);
        // preModifyTable can't bypass default action.
        assertTrue("Test table should have been modified", cp.wasModifyTableCalled());
        // add a column family
        admin.addColumnFamily(tableName, new HColumnDescriptor(TEST_FAMILY2));
        assertTrue("New column family shouldn't have been added to test table", cp.preAddColumnCalledOnly());
        // modify a column family
        HColumnDescriptor hcd1 = new HColumnDescriptor(TEST_FAMILY2);
        hcd1.setMaxVersions(25);
        admin.modifyColumnFamily(tableName, hcd1);
        assertTrue("Second column family should be modified", cp.preModifyColumnCalledOnly());
        // truncate table
        admin.truncateTable(tableName, false);
        // delete table
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        deleteTable(admin, tableName);
        assertFalse("Test table should have been deleted", admin.tableExists(tableName));
        // preDeleteTable can't bypass default action.
        assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled());
        assertTrue("Delete table handler should be called.", cp.wasDeleteTableActionCalled());
        // turn off bypass, run the tests again
        cp.enableBypass(false);
        cp.resetStates();
        admin.createTable(htd);
        assertTrue("Test table should be created", cp.wasCreateTableCalled());
        tableCreationLatch.await();
        assertTrue("Table pre create handler called.", cp.wasPreCreateTableActionCalled());
        assertTrue("Table create handler should be called.", cp.wasCreateTableActionCalled());
        // disable
        assertFalse(cp.wasDisableTableCalled());
        assertFalse(cp.wasDisableTableActionCalled());
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled());
        assertTrue("Disable table handler should be called.", cp.wasDisableTableActionCalled());
        // modify table
        htd.setMaxFileSize(512 * 1024 * 1024);
        modifyTableSync(admin, tableName, htd);
        assertTrue("Test table should have been modified", cp.wasModifyTableCalled());
        // add a column family
        admin.addColumnFamily(tableName, new HColumnDescriptor(TEST_FAMILY2));
        assertTrue("New column family should have been added to test table", cp.wasAddColumnCalled());
        assertTrue("Add column handler should be called.", cp.wasAddColumnFamilyActionCalled());
        // modify a column family
        HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2);
        hcd.setMaxVersions(25);
        admin.modifyColumnFamily(tableName, hcd);
        assertTrue("Second column family should be modified", cp.wasModifyColumnCalled());
        assertTrue("Modify table handler should be called.", cp.wasModifyColumnFamilyActionCalled());
        // enable
        assertFalse(cp.wasEnableTableCalled());
        assertFalse(cp.wasEnableTableActionCalled());
        admin.enableTable(tableName);
        assertTrue(admin.isTableEnabled(tableName));
        assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled());
        assertTrue("Enable table handler should be called.", cp.wasEnableTableActionCalled());
        // disable again
        admin.disableTable(tableName);
        assertTrue(admin.isTableDisabled(tableName));
        // delete column
        assertFalse("No column family deleted yet", cp.wasDeleteColumnCalled());
        assertFalse("Delete table column handler should not be called.", cp.wasDeleteColumnFamilyActionCalled());
        admin.deleteColumnFamily(tableName, TEST_FAMILY2);
        HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
        assertNull("'" + Bytes.toString(TEST_FAMILY2) + "' should have been removed", tableDesc.getFamily(TEST_FAMILY2));
        assertTrue("Coprocessor should have been called on column delete", cp.wasDeleteColumnCalled());
        assertTrue("Delete table column handler should be called.", cp.wasDeleteColumnFamilyActionCalled());
        // delete table
        assertFalse("No table deleted yet", cp.wasDeleteTableCalled());
        assertFalse("Delete table handler should not be called.", cp.wasDeleteTableActionCalled());
        deleteTable(admin, tableName);
        assertFalse("Test table should have been deleted", admin.tableExists(tableName));
        assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled());
        assertTrue("Delete table handler should be called.", cp.wasDeleteTableActionCalled());
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) MiniHBaseCluster(org.apache.hadoop.hbase.MiniHBaseCluster) Admin(org.apache.hadoop.hbase.client.Admin) CountDownLatch(java.util.concurrent.CountDownLatch) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 93 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestCoprocessorMetrics method testRegionObserverAfterRegionClosed.

@Test
public void testRegionObserverAfterRegionClosed() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
        Admin admin = connection.getAdmin()) {
        admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionObserver.class.getName()), // create with 2 regions
        new byte[][] { foo });
        try (Table table = connection.getTable(tableName)) {
            table.get(new Get(foo));
            // 2 gets
            table.get(new Get(foo));
        }
        assertPreGetRequestsCounter(CustomRegionObserver.class);
        // close one of the regions
        try (RegionLocator locator = connection.getRegionLocator(tableName)) {
            HRegionLocation loc = locator.getRegionLocation(foo);
            admin.closeRegion(loc.getServerName(), loc.getRegionInfo());
            HRegionServer server = UTIL.getMiniHBaseCluster().getRegionServer(loc.getServerName());
            UTIL.waitFor(30000, () -> server.getOnlineRegion(loc.getRegionInfo().getRegionName()) == null);
            assertNull(server.getOnlineRegion(loc.getRegionInfo().getRegionName()));
        }
        // with only 1 region remaining, we should still be able to find the Counter
        assertPreGetRequestsCounter(CustomRegionObserver.class);
        // close the table
        admin.disableTable(tableName);
        MetricRegistryInfo info = MetricsCoprocessor.createRegistryInfoForRegionCoprocessor(CustomRegionObserver.class.getName());
        // ensure that MetricRegistry is deleted
        Optional<MetricRegistry> registry = MetricRegistries.global().get(info);
        assertFalse(registry.isPresent());
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) MetricRegistry(org.apache.hadoop.hbase.metrics.MetricRegistry) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Get(org.apache.hadoop.hbase.client.Get) MetricRegistryInfo(org.apache.hadoop.hbase.metrics.MetricRegistryInfo) Test(org.junit.Test)

Example 94 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestCoprocessorMetrics method testRegionObserverMultiTable.

@Test
public void testRegionObserverMultiTable() throws IOException {
    final TableName tableName1 = TableName.valueOf(name.getMethodName() + "1");
    final TableName tableName2 = TableName.valueOf(name.getMethodName() + "2");
    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
        Admin admin = connection.getAdmin()) {
        admin.createTable(new HTableDescriptor(tableName1).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionObserver.class.getName()));
        admin.createTable(new HTableDescriptor(tableName2).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionObserver.class.getName()));
        try (Table table1 = connection.getTable(tableName1);
            Table table2 = connection.getTable(tableName2)) {
            table1.get(new Get(bar));
            // 2 gets to 2 separate tables
            table2.get(new Get(foo));
        }
    }
    assertPreGetRequestsCounter(CustomRegionObserver.class);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Get(org.apache.hadoop.hbase.client.Get) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 95 with HColumnDescriptor

use of org.apache.hadoop.hbase.HColumnDescriptor in project hbase by apache.

the class TestCoprocessorMetrics method testRegionObserverMultiRegion.

@Test
public void testRegionObserverMultiRegion() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
        Admin admin = connection.getAdmin()) {
        admin.createTable(new HTableDescriptor(tableName).addFamily(new HColumnDescriptor(foo)).addCoprocessor(CustomRegionObserver.class.getName()), // create with 2 regions
        new byte[][] { foo });
        try (Table table = connection.getTable(tableName);
            RegionLocator locator = connection.getRegionLocator(tableName)) {
            table.get(new Get(bar));
            // 2 gets to 2 separate regions
            table.get(new Get(foo));
            assertEquals(2, locator.getAllRegionLocations().size());
            assertNotEquals(locator.getRegionLocation(bar).getRegionInfo(), locator.getRegionLocation(foo).getRegionInfo());
        }
    }
    assertPreGetRequestsCounter(CustomRegionObserver.class);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Get(org.apache.hadoop.hbase.client.Get) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Aggregations

HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)671 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)554 Test (org.junit.Test)358 TableName (org.apache.hadoop.hbase.TableName)200 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)137 Put (org.apache.hadoop.hbase.client.Put)132 Table (org.apache.hadoop.hbase.client.Table)117 Admin (org.apache.hadoop.hbase.client.Admin)110 IOException (java.io.IOException)109 Path (org.apache.hadoop.fs.Path)81 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)71 ArrayList (java.util.ArrayList)66 Configuration (org.apache.hadoop.conf.Configuration)65 Connection (org.apache.hadoop.hbase.client.Connection)51 Scan (org.apache.hadoop.hbase.client.Scan)50 Result (org.apache.hadoop.hbase.client.Result)45 FileSystem (org.apache.hadoop.fs.FileSystem)44 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)42 Connection (java.sql.Connection)41 Properties (java.util.Properties)38