Search in sources :

Example 46 with HMaster

use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.

the class TestMasterCoprocessorExceptionWithRemove method testExceptionFromCoprocessorWhenCreatingTable.

@Test
public void testExceptionFromCoprocessorWhenCreatingTable() throws IOException {
    SingleProcessHBaseCluster cluster = UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
    BuggyMasterObserver cp = host.findCoprocessor(BuggyMasterObserver.class);
    assertFalse("No table created yet", cp.wasCreateTableCalled());
    // Set a watch on the zookeeper /hbase/master node. If the master dies,
    // the node will be deleted.
    // Master should *NOT* die:
    // we are testing that the default setting of hbase.coprocessor.abortonerror
    // =false
    // is respected.
    ZKWatcher zkw = new ZKWatcher(UTIL.getConfiguration(), "unittest", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZK error: " + why, e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    MasterTracker masterTracker = new MasterTracker(zkw, "/hbase/master", new Abortable() {

        @Override
        public void abort(String why, Throwable e) {
            throw new RuntimeException("Fatal ZooKeeper tracker error, why=", e);
        }

        @Override
        public boolean isAborted() {
            return false;
        }
    });
    masterTracker.start();
    zkw.registerListener(masterTracker);
    // Test (part of the) output that should have be printed by master when it aborts:
    // (namely the part that shows the set of loaded coprocessors).
    // In this test, there is only a single coprocessor (BuggyMasterObserver).
    String coprocessorName = BuggyMasterObserver.class.getName();
    assertTrue(HMaster.getLoadedCoprocessors().contains(coprocessorName));
    TableDescriptor tableDescriptor1 = TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE1)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY1)).build();
    boolean threwDNRE = false;
    try {
        Admin admin = UTIL.getAdmin();
        admin.createTable(tableDescriptor1);
    } catch (IOException e) {
        if (e.getClass().getName().equals("org.apache.hadoop.hbase.DoNotRetryIOException")) {
            threwDNRE = true;
        }
    } finally {
        assertTrue(threwDNRE);
    }
    // wait for a few seconds to make sure that the Master hasn't aborted.
    try {
        Thread.sleep(3000);
    } catch (InterruptedException e) {
        fail("InterruptedException while sleeping.");
    }
    assertFalse("Master survived coprocessor NPE, as expected.", masterTracker.masterZKNodeWasDeleted);
    String loadedCoprocessors = HMaster.getLoadedCoprocessors();
    assertTrue(loadedCoprocessors.contains(coprocessorName));
    // Verify that BuggyMasterObserver has been removed due to its misbehavior
    // by creating another table: should not have a problem this time.
    TableDescriptor tableDescriptor2 = TableDescriptorBuilder.newBuilder(TableName.valueOf(TEST_TABLE2)).setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY2)).build();
    Admin admin = UTIL.getAdmin();
    try {
        admin.createTable(tableDescriptor2);
    } catch (IOException e) {
        fail("Failed to create table after buggy coprocessor removal: " + e);
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) MasterCoprocessorHost(org.apache.hadoop.hbase.master.MasterCoprocessorHost) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Abortable(org.apache.hadoop.hbase.Abortable) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) HMaster(org.apache.hadoop.hbase.master.HMaster) Test(org.junit.Test)

Example 47 with HMaster

use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.

the class TestReopenTableRegionsProcedureInfiniteLoop method testInfiniteLoop.

@Test
public void testInfiniteLoop() throws IOException {
    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    AssignmentManager am = master.getAssignmentManager();
    ProcedureExecutor<MasterProcedureEnv> exec = master.getMasterProcedureExecutor();
    RegionInfo regionInfo = UTIL.getAdmin().getRegions(TABLE_NAME).get(0);
    RegionStateNode regionNode = am.getRegionStates().getRegionStateNode(regionInfo);
    long procId;
    ReopenTableRegionsProcedure proc = new ReopenTableRegionsProcedure(TABLE_NAME);
    regionNode.lock();
    try {
        procId = exec.submitProcedure(proc);
        UTIL.waitFor(30000, () -> proc.hasLock());
        TransitRegionStateProcedure trsp = TransitRegionStateProcedure.reopen(exec.getEnvironment(), regionInfo);
        regionNode.setProcedure(trsp);
        exec.submitProcedure(trsp);
    } finally {
        regionNode.unlock();
    }
    UTIL.waitFor(60000, () -> exec.isFinished(procId));
}
Also used : TransitRegionStateProcedure(org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure) HMaster(org.apache.hadoop.hbase.master.HMaster) AssignmentManager(org.apache.hadoop.hbase.master.assignment.AssignmentManager) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) RegionStateNode(org.apache.hadoop.hbase.master.assignment.RegionStateNode) Test(org.junit.Test)

Example 48 with HMaster

use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.

the class TestRegisterPeerWorkerWhenRestarting method testRestart.

@Test
public void testRestart() throws Exception {
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.ACTIVE);
    UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
    write(UTIL1, 0, 100);
    Thread.sleep(2000);
    // peer is disabled so no data have been replicated
    verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
    // transit the A to DA first to avoid too many error logs.
    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
    HMaster master = UTIL2.getHBaseCluster().getMaster();
    // make sure the transiting can not succeed
    FAIL = true;
    ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
            } catch (IOException e) {
                throw new UncheckedIOException(e);
            }
        }
    };
    t.start();
    // wait until we are in the states where we need to register peer worker when restarting
    UTIL2.waitFor(60000, () -> procExec.getProcedures().stream().filter(p -> p instanceof RecoverStandbyProcedure).map(p -> (RecoverStandbyProcedure) p).anyMatch(p -> p.getCurrentStateId() == DISPATCH_WALS_VALUE || p.getCurrentStateId() == UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE));
    // failover to another master
    MasterThread mt = UTIL2.getMiniHBaseCluster().getMasterThread();
    mt.getMaster().abort("for testing");
    mt.join();
    FAIL = false;
    t.join();
    // make sure the new master can finish the transition
    UTIL2.waitFor(60000, () -> UTIL2.getAdmin().getReplicationPeerSyncReplicationState(PEER_ID) == SyncReplicationState.DOWNGRADE_ACTIVE);
    verify(UTIL2, 0, 100);
}
Also used : BeforeClass(org.junit.BeforeClass) UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState.UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE) SyncReplicationState(org.apache.hadoop.hbase.replication.SyncReplicationState) HBaseClassTestRule(org.apache.hadoop.hbase.HBaseClassTestRule) LargeTests(org.apache.hadoop.hbase.testclassification.LargeTests) IOException(java.io.IOException) Test(org.junit.Test) DISPATCH_WALS_VALUE(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState.DISPATCH_WALS_VALUE) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) Category(org.junit.experimental.categories.Category) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) UncheckedIOException(java.io.UncheckedIOException) HConstants(org.apache.hadoop.hbase.HConstants) SyncReplicationTestBase(org.apache.hadoop.hbase.replication.SyncReplicationTestBase) ProcedureExecutor(org.apache.hadoop.hbase.procedure2.ProcedureExecutor) Configuration(org.apache.hadoop.conf.Configuration) MasterTests(org.apache.hadoop.hbase.testclassification.MasterTests) ClassRule(org.junit.ClassRule) HMaster(org.apache.hadoop.hbase.master.HMaster) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) HMaster(org.apache.hadoop.hbase.master.HMaster) MasterProcedureEnv(org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException) MasterThread(org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread) Test(org.junit.Test)

Example 49 with HMaster

use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.

the class TestSCPBase method tearDown.

@After
public void tearDown() throws Exception {
    SingleProcessHBaseCluster cluster = this.util.getHBaseCluster();
    HMaster master = cluster == null ? null : cluster.getMaster();
    if (master != null && master.getMasterProcedureExecutor() != null) {
        ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(master.getMasterProcedureExecutor(), false);
    }
    this.util.shutdownMiniCluster();
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) HMaster(org.apache.hadoop.hbase.master.HMaster) After(org.junit.After)

Example 50 with HMaster

use of org.apache.hadoop.hbase.master.HMaster in project hbase by apache.

the class TestHRegionOnCluster method testDataCorrectnessReplayingRecoveredEdits.

@Test
public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
    final int NUM_RS = 3;
    Admin hbaseAdmin = null;
    TEST_UTIL.startMiniCluster(NUM_RS);
    try {
        final TableName tableName = TableName.valueOf(name.getMethodName());
        final byte[] FAMILY = Bytes.toBytes("family");
        SingleProcessHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
        HMaster master = cluster.getMaster();
        // Create table
        TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).build();
        hbaseAdmin = master.getConnection().getAdmin();
        hbaseAdmin.createTable(tableDescriptor);
        assertTrue(hbaseAdmin.isTableAvailable(tableName));
        // Put data: r1->v1
        LOG.info("Loading r1 to v1 into " + tableName);
        Table table = TEST_UTIL.getConnection().getTable(tableName);
        putDataAndVerify(table, "r1", FAMILY, "v1", 1);
        TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
        // Move region to target server
        RegionInfo regionInfo;
        try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
            regionInfo = locator.getRegionLocation(Bytes.toBytes("r1")).getRegion();
        }
        int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
        HRegionServer originServer = cluster.getRegionServer(originServerNum);
        int targetServerNum = (originServerNum + 1) % NUM_RS;
        HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
        assertFalse(originServer.equals(targetServer));
        TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
        LOG.info("Moving " + regionInfo.getEncodedName() + " to " + targetServer.getServerName());
        hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), targetServer.getServerName());
        do {
            Thread.sleep(1);
        } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
        // Put data: r2->v2
        LOG.info("Loading r2 to v2 into " + tableName);
        putDataAndVerify(table, "r2", FAMILY, "v2", 2);
        TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
        // Move region to origin server
        LOG.info("Moving " + regionInfo.getEncodedName() + " to " + originServer.getServerName());
        hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), originServer.getServerName());
        do {
            Thread.sleep(1);
        } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
        // Put data: r3->v3
        LOG.info("Loading r3 to v3 into " + tableName);
        putDataAndVerify(table, "r3", FAMILY, "v3", 3);
        // Kill target server
        LOG.info("Killing target server " + targetServer.getServerName());
        targetServer.kill();
        cluster.getRegionServerThreads().get(targetServerNum).join();
        // Wait until finish processing of shutdown
        while (master.getServerManager().areDeadServersInProgress()) {
            Thread.sleep(5);
        }
        // Kill origin server
        LOG.info("Killing origin server " + targetServer.getServerName());
        originServer.kill();
        cluster.getRegionServerThreads().get(originServerNum).join();
        // Put data: r4->v4
        LOG.info("Loading r4 to v4 into " + tableName);
        putDataAndVerify(table, "r4", FAMILY, "v4", 4);
    } finally {
        if (hbaseAdmin != null)
            hbaseAdmin.close();
        TEST_UTIL.shutdownMiniCluster();
    }
}
Also used : SingleProcessHBaseCluster(org.apache.hadoop.hbase.SingleProcessHBaseCluster) TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) HMaster(org.apache.hadoop.hbase.master.HMaster) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) Admin(org.apache.hadoop.hbase.client.Admin) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Test(org.junit.Test)

Aggregations

HMaster (org.apache.hadoop.hbase.master.HMaster)132 Test (org.junit.Test)91 TableName (org.apache.hadoop.hbase.TableName)42 IOException (java.io.IOException)33 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)31 ServerName (org.apache.hadoop.hbase.ServerName)24 Admin (org.apache.hadoop.hbase.client.Admin)23 Table (org.apache.hadoop.hbase.client.Table)23 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)22 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)16 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)15 Configuration (org.apache.hadoop.conf.Configuration)13 MasterCoprocessorHost (org.apache.hadoop.hbase.master.MasterCoprocessorHost)12 BeforeClass (org.junit.BeforeClass)11 AssignmentManager (org.apache.hadoop.hbase.master.assignment.AssignmentManager)10 List (java.util.List)9 HBaseClassTestRule (org.apache.hadoop.hbase.HBaseClassTestRule)9 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)9 RegionStates (org.apache.hadoop.hbase.master.assignment.RegionStates)9 ClassRule (org.junit.ClassRule)9