Search in sources :

Example 86 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestMasterWalManager method testRemoveStaleRecoveringRegionsDuringMasterInitialization.

@Test
public void testRemoveStaleRecoveringRegionsDuringMasterInitialization() throws Exception {
    // this test is for when distributed log replay is enabled
    if (!UTIL.getConfiguration().getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false))
        return;
    LOG.info("Starting testRemoveStaleRecoveringRegionsDuringMasterInitialization");
    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
    MasterWalManager mwm = master.getMasterWalManager();
    String failedRegion = "failedRegoin1";
    String staleRegion = "staleRegion";
    ServerName inRecoveryServerName = ServerName.valueOf("mgr,1,1");
    ServerName previouselyFaildServerName = ServerName.valueOf("previous,1,1");
    String walPath = "/hbase/data/.logs/" + inRecoveryServerName.getServerName() + "-splitting/test";
    // Create a ZKW to use in the test
    ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
    zkw.getRecoverableZooKeeper().create(ZKSplitLog.getEncodedNodeName(zkw, walPath), new SplitLogTask.Owned(inRecoveryServerName, mwm.getLogRecoveryMode()).toByteArray(), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
    String staleRegionPath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, staleRegion);
    ZKUtil.createWithParents(zkw, staleRegionPath);
    String inRecoveringRegionPath = ZKUtil.joinZNode(zkw.znodePaths.recoveringRegionsZNode, failedRegion);
    inRecoveringRegionPath = ZKUtil.joinZNode(inRecoveringRegionPath, inRecoveryServerName.getServerName());
    ZKUtil.createWithParents(zkw, inRecoveringRegionPath);
    Set<ServerName> servers = new HashSet<>();
    servers.add(previouselyFaildServerName);
    mwm.removeStaleRecoveringRegionsFromZK(servers);
    // verification
    assertFalse(ZKUtil.checkExists(zkw, staleRegionPath) != -1);
    assertTrue(ZKUtil.checkExists(zkw, inRecoveringRegionPath) != -1);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.recoveringRegionsZNode);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.splitLogZNode);
    zkw.close();
}
Also used : ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ServerName(org.apache.hadoop.hbase.ServerName) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 87 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestLogsCleaner method testZooKeeperAbort.

/**
   * ReplicationLogCleaner should be able to ride over ZooKeeper errors without
   * aborting.
   */
@Test
public void testZooKeeperAbort() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
    List<FileStatus> dummyFiles = Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2")));
    FaultyZooKeeperWatcher faultyZK = new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
    try {
        faultyZK.init();
        cleaner.setConf(conf, faultyZK);
        // should keep all files due to a ConnectionLossException getting the queues znodes
        Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
        assertFalse(toDelete.iterator().hasNext());
        assertFalse(cleaner.isStopped());
    } finally {
        faultyZK.close();
    }
    // when zk is working both files should be returned
    cleaner = new ReplicationLogCleaner();
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
    try {
        cleaner.setConf(conf, zkw);
        Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
        Iterator<FileStatus> iter = filesToDelete.iterator();
        assertTrue(iter.hasNext());
        assertEquals(new Path("log1"), iter.next().getPath());
        assertTrue(iter.hasNext());
        assertEquals(new Path("log2"), iter.next().getPath());
        assertFalse(iter.hasNext());
    } finally {
        zkw.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) ReplicationLogCleaner(org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Test(org.junit.Test)

Example 88 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestSplitLogManager method setup.

@Before
public void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    conf = TEST_UTIL.getConfiguration();
    // Use a different ZK wrapper instance for each tests.
    zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null);
    master = new DummyMasterServices(zkw, conf);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.znodePaths.baseZNode);
    ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.baseZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.znodePaths.baseZNode) != -1);
    LOG.debug(zkw.znodePaths.baseZNode + " created");
    ZKUtil.createAndFailSilent(zkw, zkw.znodePaths.splitLogZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.znodePaths.splitLogZNode) != -1);
    LOG.debug(zkw.znodePaths.splitLogZNode + " created");
    resetCounters();
    // By default, we let the test manage the error as before, so the server
    // does not appear as dead from the master point of view, only from the split log pov.
    Mockito.when(sm.isServerOnline(Mockito.any(ServerName.class))).thenReturn(true);
    to = 12000;
    conf.setInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, to);
    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
    to = to + 16 * 100;
    this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ? RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
}
Also used : HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ServerName(org.apache.hadoop.hbase.ServerName) Before(org.junit.Before)

Example 89 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestTableStateManager method testUpgradeFromZk.

@Test(timeout = 60000)
public void testUpgradeFromZk() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    TEST_UTIL.startMiniCluster(2, 1);
    TEST_UTIL.shutdownMiniHBaseCluster();
    ZooKeeperWatcher watcher = TEST_UTIL.getZooKeeperWatcher();
    setTableStateInZK(watcher, tableName, ZooKeeperProtos.DeprecatedTableState.State.DISABLED);
    TEST_UTIL.restartHBaseCluster(1);
    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    Assert.assertEquals(master.getTableStateManager().getTableState(tableName), TableState.State.DISABLED);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Test(org.junit.Test)

Example 90 with ZooKeeperWatcher

use of org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher in project hbase by apache.

the class TestZKProcedure method runCommit.

private void runCommit(String... members) throws Exception {
    // make sure we just have an empty list
    if (members == null) {
        members = new String[0];
    }
    List<String> expected = Arrays.asList(members);
    // setup the constants
    ZooKeeperWatcher coordZkw = newZooKeeperWatcher();
    String opDescription = "coordination test - " + members.length + " cohort members";
    // start running the controller
    ZKProcedureCoordinator coordinatorComms = new ZKProcedureCoordinator(coordZkw, opDescription, COORDINATOR_NODE_NAME);
    ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
    ProcedureCoordinator coordinator = new ProcedureCoordinator(coordinatorComms, pool) {

        @Override
        public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs, List<String> expectedMembers) {
            return Mockito.spy(super.createProcedure(fed, procName, procArgs, expectedMembers));
        }
    };
    // build and start members
    // NOTE: There is a single subprocedure builder for all members here.
    SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class);
    List<Pair<ProcedureMember, ZKProcedureMemberRpcs>> procMembers = new ArrayList<>(members.length);
    // start each member
    for (String member : members) {
        ZooKeeperWatcher watcher = newZooKeeperWatcher();
        ZKProcedureMemberRpcs comms = new ZKProcedureMemberRpcs(watcher, opDescription);
        ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE);
        ProcedureMember procMember = new ProcedureMember(comms, pool2, subprocFactory);
        procMembers.add(new Pair<>(procMember, comms));
        comms.start(member, procMember);
    }
    // setup mock member subprocedures
    final List<Subprocedure> subprocs = new ArrayList<>();
    for (int i = 0; i < procMembers.size(); i++) {
        ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher();
        Subprocedure commit = Mockito.spy(new SubprocedureImpl(procMembers.get(i).getFirst(), opName, cohortMonitor, WAKE_FREQUENCY, TIMEOUT));
        subprocs.add(commit);
    }
    // link subprocedure to buildNewOperation invocation.
    // NOTE: would be racy if not an AtomicInteger
    final AtomicInteger i = new AtomicInteger(0);
    Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName), (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer<Subprocedure>() {

        @Override
        public Subprocedure answer(InvocationOnMock invocation) throws Throwable {
            int index = i.getAndIncrement();
            LOG.debug("Task size:" + subprocs.size() + ", getting:" + index);
            Subprocedure commit = subprocs.get(index);
            return commit;
        }
    });
    // setup spying on the coordinator
    //    Procedure proc = Mockito.spy(procBuilder.createProcedure(coordinator, opName, data, expected));
    //    Mockito.when(procBuilder.build(coordinator, opName, data, expected)).thenReturn(proc);
    // start running the operation
    Procedure task = coordinator.startProcedure(new ForeignExceptionDispatcher(), opName, data, expected);
    //    assertEquals("Didn't mock coordinator task", proc, task);
    // verify all things ran as expected
    //    waitAndVerifyProc(proc, once, once, never(), once, false);
    waitAndVerifyProc(task, once, once, never(), once, false);
    verifyCohortSuccessful(expected, subprocFactory, subprocs, once, once, never(), once, false);
    // close all the things
    closeAll(coordinator, coordinatorComms, procMembers);
}
Also used : ArrayList(java.util.ArrayList) ArrayEquals(org.mockito.internal.matchers.ArrayEquals) SubprocedureImpl(org.apache.hadoop.hbase.procedure.Subprocedure.SubprocedureImpl) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Aggregations

ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)105 Test (org.junit.Test)46 Configuration (org.apache.hadoop.conf.Configuration)33 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)21 Table (org.apache.hadoop.hbase.client.Table)20 IOException (java.io.IOException)19 ServerName (org.apache.hadoop.hbase.ServerName)16 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)15 Ignore (org.junit.Ignore)15 ArrayList (java.util.ArrayList)14 RegionServerThread (org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread)13 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)12 BeforeClass (org.junit.BeforeClass)12 HBaseTestingUtility (org.apache.hadoop.hbase.HBaseTestingUtility)11 List (java.util.List)10 KeeperException (org.apache.zookeeper.KeeperException)10 TimeoutException (java.util.concurrent.TimeoutException)9 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)9 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)9 Waiter (org.apache.hadoop.hbase.Waiter)9