Search in sources :

Example 26 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterObserver method testTableNamesEnumeration.

@Test
public void testTableNamesEnumeration() throws Exception {
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    CPMasterObserver cp = host.findCoprocessor(CPMasterObserver.class);
    cp.resetStates();
    master.getMasterRpcServices().getTableNames(null, GetTableNamesRequest.newBuilder().build());
    assertTrue("Coprocessor should be called on table names request", cp.wasGetTableNamesCalled());
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 27 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestMasterCoprocessorExceptionWithRemove method testExceptionFromCoprocessorWhenCreatingTable.

@Test
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // Set a watch on the zookeeper /hbase/master node. If the master dies,
    // the node will be deleted.
    // Master should *NOT* die:
    // we are testing that the default setting of hbase.coprocessor.abortonerror
    // =false
    // is respected.
    ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK error: " + why, e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZooKeeper tracker error, why=", e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    masterTracker.start();
    zkw.registerListener(masterTracker);
    // Test (part of the) output that should have be printed by master when it aborts:
    // (namely the part that shows the set of loaded coprocessors).
    // In this test, there is only a single coprocessor (BuggyMasterObserver).
    String coprocessorName = BuggyMasterObserver.class.getName();
    assertTrue(HMaster.getLoadedCoprocessors().contains(coprocessorName));
    TableDescriptor tableDescriptor1 = TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE1)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY1)).build();
    boolean threwDNRE = false;
    try {
        Admin admin = UTIL.getAdmin();
        admin.createTable(tableDescriptor1);
    } catch (IOException e) {
        if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) {
            threwDNRE = true;
        }
    } finally {
        assertTrue(threwDNRE);
    }
    // wait for a few seconds to make sure that the Master hasn't aborted.
    try {
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        fail("InterruptedException while sleeping.");
    }
    assertFalse("Master survived coprocessor NPE, as expected.", masterTracker.masterZKNodeWasDeleted);
    String loadedCoprocessors = HMaster.getLoadedCoprocessors();
    assertTrue(loadedCoprocessors.contains(coprocessorName));
    // Verify that BuggyMasterObserver has been removed due to its misbehavior
    // by creating another table: should not have a problem this time.
    TableDescriptor tableDescriptor2 = TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE2)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY2)).build();
    Admin admin = UTIL.getAdmin();
    try {
        admin.createTable(tableDescriptor2);
    } catch (IOException e) {
        fail("Failed to create table after buggy coprocessor removal: " + e);
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Abortable(org.apache.hadoop.hbase.Abortable) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 28 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestSCPBase method tearDown.

@After
public void tearDown() throws Exception {
    SingleProcessHBaseCluster cluster = this.util.getHBaseCluster();
    HMaster master = cluster == null ? null : cluster.getMaster();
    if (master != null && master.getMasterProcedureExecutor() != null) {
        ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), false);
    }
    this.util.shutdownMiniCluster();
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HMaster(org.apache.hadoop.hbase.master.HMaster) After(org.junit.After)

Example 29 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestHRegion method testgetHDFSBlocksDistribution.

@Test
public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtil htu = new HBaseTestingUtil();
    // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
    // break up the file in to more pieces that can be distributed across the three nodes and we
    // won't be able to have the condition this test asserts; that at least one node has
    // a copy of all replicas -- if small block size, then blocks are spread evenly across the
    // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
    // final int DEFAULT_BLOCK_SIZE = 1024;
    // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);
    // set up a cluster with 3 nodes
    SingleProcessHBaseCluster cluster = null;
    String[] dataNodeHosts = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;
    try {
        StartTestingClusterOption option = StartTestingClusterOption.builder().numRegionServers(regionServersCount).dataNodeHosts(dataNodeHosts).build();
        cluster = htu.startMiniCluster(option);
        byte[][] families = { fam1, fam2 };
        Table ht = htu.createTable(tableName, families);
        // Setting up region
        byte[] row = Bytes.toBytes("row1");
        byte[] col = Bytes.toBytes("col1");
        Put put = new Put(row);
        put.addColumn(fam1, col, 1, Bytes.toBytes("test1"));
        put.addColumn(fam2, col, 1, Bytes.toBytes("test2"));
        ht.put(put);
        HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
        firstRegion.flush(true);
        HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();
        // Given the default replication factor is 2 and we have 2 HFiles,
        // we will have total of 4 replica of blocks on 3 datanodes; thus there
        // must be at least one host that have replica for 2 HFiles. That host's
        // weight will be equal to the unique block weight.
        long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
        StringBuilder sb = new StringBuilder();
        for (String host : blocksDistribution1.getTopHosts()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(host);
            sb.append("=");
            sb.append(blocksDistribution1.getWeight(host));
        }
        String topHost = blocksDistribution1.getTopHosts().get(0);
        long topHostWeight = blocksDistribution1.getWeight(topHost);
        String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" + topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
        LOG.info(msg);
        assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);
        // use the static method to compute the value, it should be the same.
        // static method is used by load balancer or other components
        HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo());
        long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();
        assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);
        ht.close();
    } finally {
        if (cluster != null) {
            htu.shutdownMiniCluster();
        }
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) Table(org.apache.hadoop.hbase.client.Table) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) HDFSBlocksDistribution(org.apache.hadoop.hbase.HDFSBlocksDistribution) Put(org.apache.hadoop.hbase.client.Put) StartTestingClusterOption(org.apache.hadoop.hbase.StartTestingClusterOption) Test(org.junit.Test)

Example 30 with SingleProcessHBaseCluster

use of org.apache.hadoop.hbase.SingleProcessHBaseCluster in project hbase by apache.

the class TestHRegionOnCluster method testDataCorrectnessReplayingRecoveredEdits.

@Test
public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
    final int NUM_RS = 3;
    Admin hbaseAdmin = null;
    TEST_UTIL.startMiniCluster(NUM_RS);
    try {
        final TableName tableName = TableName.valueOf(name.getMethodName());
        final byte[] FAMILY = Bytes.toBytes("family");
        SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
        HMaster master = cluster.getMaster();
        // Create table
        TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
        hbaseAdmin = master.getConnection().getAdmin();
        hbaseAdmin.createTable(tableDescriptor);
        assertTrue(hbaseAdmin.isTableAvailable(tableName));
        // Put data: r1->v1
        LOG.info("Loading r1 to v1 into " + tableName);
        Table table = TEST_UTIL.getConnection().getTable(tableName);
        putDataAndVerify(table, "r1", FAMILY, "v1", 1);
        TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
        // Move region to target server
        RegionInfo regionInfo;
        try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
            regionInfo = locator.getRegionLocation(Bytes.toBytes("r1")).getRegion();
        }
        int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
        HRegionServer originServer = cluster.getRegionServer(originServerNum);
        int targetServerNum = (originServerNum + 1) % NUM_RS;
        HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
        assertFalse(originServer.equals(targetServer));
        TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
        LOG.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
        hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), targetServer.getServerName());
        do {
            Thread.sleep(1);
        } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
        // Put data: r2->v2
        LOG.info("Loading r2 to v2 into " + tableName);
        putDataAndVerify(table, "r2", FAMILY, "v2", 2);
        TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
        // Move region to origin server
        LOG.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
        hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), originServer.getServerName());
        do {
            Thread.sleep(1);
        } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
        // Put data: r3->v3
        LOG.info("Loading r3 to v3 into " + tableName);
        putDataAndVerify(table, "r3", FAMILY, "v3", 3);
        // Kill target server
        LOG.info("Killing target server " + targetServer.getServerName());
        targetServer.kill();
        cluster.getRegionServerThreads().get(targetServerNum).join();
        // Wait until finish processing of shutdown
        while (master.getServerManager().areDeadServersInProgress()) {
            Thread.sleep(5);
        }
        // Kill origin server
        LOG.info("Killing origin server " + targetServer.getServerName());
        originServer.kill();
        cluster.getRegionServerThreads().get(originServerNum).join();
        // Put data: r4->v4
        LOG.info("Loading r4 to v4 into " + tableName);
        putDataAndVerify(table, "r4", FAMILY, "v4", 4);
    } finally {
        if (hbaseAdmin != null)
            hbaseAdmin.close();
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HMaster(org.apache.hadoop.hbase.master.HMaster) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Aggregations

SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)85 Test (org.junit.Test)69 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)31 TableName (org.apache.hadoop.hbase.TableName)26 Admin (org.apache.hadoop.hbase.client.Admin)24 Table (org.apache.hadoop.hbase.client.Table)22 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)22 HMaster (org.apache.hadoop.hbase.master.HMaster)21 ServerName (org.apache.hadoop.hbase.ServerName)18 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)18 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)14 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)13 IOException (java.io.IOException)12 Configuration (org.apache.hadoop.conf.Configuration)12 Put (org.apache.hadoop.hbase.client.Put)12 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)12 TableDescriptorBuilder (org.apache.hadoop.hbase.client.TableDescriptorBuilder)10 File (java.io.File)9 Path (org.apache.hadoop.fs.Path)9 RegionMoverBuilder (org.apache.hadoop.hbase.util.RegionMover.RegionMoverBuilder)9