Search in sources :

Example 51 with ZKWatcher

use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.

the class TestNamespaceReplicationWithBulkLoadedData method testBulkLoadReplicationActiveActive.

@Test
@Override
public void testBulkLoadReplicationActiveActive() throws Exception {
    Table peer1TestTable = UTIL1.getConnection().getTable(TestReplicationBase.tableName);
    Table peer2TestTable = UTIL2.getConnection().getTable(TestReplicationBase.tableName);
    Table peer3TestTable = UTIL3.getConnection().getTable(TestReplicationBase.tableName);
    Table notPeerTable = UTIL4.getConnection().getTable(TestReplicationBase.tableName);
    Table ns1Table = UTIL4.getConnection().getTable(NS1_TABLE);
    Table ns2Table = UTIL4.getConnection().getTable(NS2_TABLE);
    // case1: The ns1 tables will be replicate to cluster4
    byte[] row = Bytes.toBytes("002_ns_peer");
    byte[] value = Bytes.toBytes("v2");
    bulkLoadOnCluster(ns1Table.getName(), row, value, UTIL1);
    waitForReplication(ns1Table, 1, NB_RETRIES);
    assertTableHasValue(ns1Table, row, value);
    // case2: The ns2:t2_syncup will be replicate to cluster4
    // If it's not fix HBASE-23098 the ns_peer1's hfile-refs(zk) will be backlog
    row = Bytes.toBytes("003_ns_table_peer");
    value = Bytes.toBytes("v2");
    bulkLoadOnCluster(ns2Table.getName(), row, value, UTIL1);
    waitForReplication(ns2Table, 1, NB_RETRIES);
    assertTableHasValue(ns2Table, row, value);
    // case3: The table test will be replicate to cluster1,cluster2,cluster3
    // not replicate to cluster4, because we not set other peer for that tables.
    row = Bytes.toBytes("001_nopeer");
    value = Bytes.toBytes("v1");
    assertBulkLoadConditions(tableName, row, value, UTIL1, peer1TestTable, peer2TestTable, peer3TestTable);
    // 1 -> 4, table is empty
    assertTableNoValue(notPeerTable, row, value);
    // Verify hfile-refs for 1:ns_peer1, expect is empty
    MiniZooKeeperCluster zkCluster = UTIL1.getZkCluster();
    ZKWatcher watcher = new ZKWatcher(UTIL1.getConfiguration(), "TestZnodeHFiles-refs", null);
    RecoverableZooKeeper zk = RecoverableZooKeeper.connect(UTIL1.getConfiguration(), watcher);
    ZKReplicationQueueStorage replicationQueueStorage = new ZKReplicationQueueStorage(watcher, UTIL1.getConfiguration());
    Set<String> hfiles = replicationQueueStorage.getAllHFileRefs();
    assertTrue(hfiles.isEmpty());
}
Also used : Table(org.apache.hadoop.hbase.client.Table) RecoverableZooKeeper(org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) Test(org.junit.Test)

Example 52 with ZKWatcher

use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.

the class TestPerTableCFReplication method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    conf1 = HBaseConfiguration.create();
    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
    // smaller block size and capacity to trigger more operations
    // and test them
    conf1.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
    conf1.setInt("replication.source.size.capacity", 1024);
    conf1.setLong("replication.source.sleepforretries", 100);
    conf1.setInt("hbase.regionserver.maxlogs", 10);
    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
    conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, "org.apache.hadoop.hbase.replication.TestMasterReplication$CoprocessorCounter");
    utility1 = new HBaseTestingUtil(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    new ZKWatcher(conf1, "cluster1", null, true);
    conf2 = new Configuration(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf3 = new Configuration(conf1);
    conf3.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/3");
    utility2 = new HBaseTestingUtil(conf2);
    utility2.setZkCluster(miniZK);
    new ZKWatcher(conf2, "cluster3", null, true);
    utility3 = new HBaseTestingUtil(conf3);
    utility3.setZkCluster(miniZK);
    new ZKWatcher(conf3, "cluster3", null, true);
    table = TableDescriptorBuilder.newBuilder(tableName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(famName).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.of(noRepfamName)).build();
    tabA = TableDescriptorBuilder.newBuilder(tabAName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
    tabB = TableDescriptorBuilder.newBuilder(tabBName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
    tabC = TableDescriptorBuilder.newBuilder(tabCName).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f1Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f2Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(f3Name).setScope(HConstants.REPLICATION_SCOPE_GLOBAL).build()).build();
    utility1.startMiniCluster();
    utility2.startMiniCluster();
    utility3.startMiniCluster();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) BeforeClass(org.junit.BeforeClass)

Example 53 with ZKWatcher

use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.

the class TestSplitLogManager method setup.

@Before
public void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtil();
    TEST_UTIL.startMiniZKCluster();
    conf = TEST_UTIL.getConfiguration();
    // Use a different ZK wrapper instance for each tests.
    zkw = new ZKWatcher(conf, "split-log-manager-tests" + TEST_UTIL.getRandomUUID().toString(), null);
    master = new DummyMasterServices(zkw, conf);
    ZKUtil.deleteChildrenRecursively(zkw, zkw.getZNodePaths().baseZNode);
    ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().baseZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode) != -1);
    LOG.debug(zkw.getZNodePaths().baseZNode + " created");
    ZKUtil.createAndFailSilent(zkw, zkw.getZNodePaths().splitLogZNode);
    assertTrue(ZKUtil.checkExists(zkw, zkw.getZNodePaths().splitLogZNode) != -1);
    LOG.debug(zkw.getZNodePaths().splitLogZNode + " created");
    resetCounters();
    // By default, we let the test manage the error as before, so the server
    // does not appear as dead from the master point of view, only from the split log pov.
    Mockito.when(sm.isServerOnline(Mockito.any())).thenReturn(true);
    to = 12000;
    conf.setInt(HConstants.HBASE_SPLITLOG_MANAGER_TIMEOUT, to);
    conf.setInt("hbase.splitlog.manager.unassigned.timeout", 2 * to);
    conf.setInt("hbase.splitlog.manager.timeoutmonitor.period", 100);
    to = to + 16 * 100;
}
Also used : ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) HBaseTestingUtil(org.apache.hadoop.hbase.HBaseTestingUtil) Before(org.junit.Before)

Example 54 with ZKWatcher

use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.

the class TestZKProcedure method runCommit.

private void runCommit(String... members) throws Exception {
    // make sure we just have an empty list
    if (members == null) {
        members = new String[0];
    }
    List<String> expected = Arrays.asList(members);
    // setup the constants
    ZKWatcher coordZkw = newZooKeeperWatcher();
    String opDescription = "coordination test - " + members.length + " cohort members";
    // start running the controller
    ZKProcedureCoordinator coordinatorComms = new ZKProcedureCoordinator(coordZkw, opDescription, COORDINATOR_NODE_NAME);
    ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
    ProcedureCoordinator coordinator = new ProcedureCoordinator(coordinatorComms, pool) {

        @Override
        public Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs, List<String> expectedMembers) {
            return Mockito.spy(super.createProcedure(fed, procName, procArgs, expectedMembers));
        }
    };
    // build and start members
    // NOTE: There is a single subprocedure builder for all members here.
    SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class);
    List<Pair<ProcedureMember, ZKProcedureMemberRpcs>> procMembers = new ArrayList<>(members.length);
    // start each member
    for (String member : members) {
        ZKWatcher watcher = newZooKeeperWatcher();
        ZKProcedureMemberRpcs comms = new ZKProcedureMemberRpcs(watcher, opDescription);
        ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE);
        ProcedureMember procMember = new ProcedureMember(comms, pool2, subprocFactory);
        procMembers.add(new Pair<>(procMember, comms));
        comms.start(member, procMember);
    }
    // setup mock member subprocedures
    final List<Subprocedure> subprocs = new ArrayList<>();
    for (int i = 0; i < procMembers.size(); i++) {
        ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher();
        Subprocedure commit = Mockito.spy(new SubprocedureImpl(procMembers.get(i).getFirst(), opName, cohortMonitor, WAKE_FREQUENCY, TIMEOUT));
        subprocs.add(commit);
    }
    // link subprocedure to buildNewOperation invocation.
    // NOTE: would be racy if not an AtomicInteger
    final AtomicInteger i = new AtomicInteger(0);
    Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName), (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer<Subprocedure>() {

        @Override
        public Subprocedure answer(InvocationOnMock invocation) throws Throwable {
            int index = i.getAndIncrement();
            LOG.debug("Task size:" + subprocs.size() + ", getting:" + index);
            Subprocedure commit = subprocs.get(index);
            return commit;
        }
    });
    // setup spying on the coordinator
    // Procedure proc = Mockito.spy(procBuilder.createProcedure(coordinator, opName, data, expected));
    // Mockito.when(procBuilder.build(coordinator, opName, data, expected)).thenReturn(proc);
    // start running the operation
    Procedure task = coordinator.startProcedure(new ForeignExceptionDispatcher(), opName, data, expected);
    // assertEquals("Didn't mock coordinator task", proc, task);
    // verify all things ran as expected
    // waitAndVerifyProc(proc, once, once, never(), once, false);
    waitAndVerifyProc(task, once, once, never(), once, false);
    verifyCohortSuccessful(expected, subprocFactory, subprocs, once, once, never(), once, false);
    // close all the things
    closeAll(coordinator, coordinatorComms, procMembers);
}
Also used : ArrayList(java.util.ArrayList) ArrayEquals(org.mockito.internal.matchers.ArrayEquals) SubprocedureImpl(org.apache.hadoop.hbase.procedure.Subprocedure.SubprocedureImpl) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor)

Example 55 with ZKWatcher

use of org.apache.hadoop.hbase.zookeeper.ZKWatcher in project hbase by apache.

the class TestZKProcedure method testMultiCohortWithMemberTimeoutDuringPrepare.

/**
 * Test a distributed commit with multiple cohort members, where one of the cohort members has a
 * timeout exception during the prepare stage.
 */
@Test
public void testMultiCohortWithMemberTimeoutDuringPrepare() throws Exception {
    String opDescription = "error injection coordination";
    String[] cohortMembers = new String[] { "one", "two", "three" };
    List<String> expected = Lists.newArrayList(cohortMembers);
    // error constants
    final int memberErrorIndex = 2;
    final CountDownLatch coordinatorReceivedErrorLatch = new CountDownLatch(1);
    // start running the coordinator and its controller
    ZKWatcher coordinatorWatcher = newZooKeeperWatcher();
    ZKProcedureCoordinator coordinatorController = new ZKProcedureCoordinator(coordinatorWatcher, opDescription, COORDINATOR_NODE_NAME);
    ThreadPoolExecutor pool = ProcedureCoordinator.defaultPool(COORDINATOR_NODE_NAME, POOL_SIZE, KEEP_ALIVE);
    ProcedureCoordinator coordinator = spy(new ProcedureCoordinator(coordinatorController, pool));
    // start a member for each node
    SubprocedureFactory subprocFactory = Mockito.mock(SubprocedureFactory.class);
    List<Pair<ProcedureMember, ZKProcedureMemberRpcs>> members = new ArrayList<>(expected.size());
    for (String member : expected) {
        ZKWatcher watcher = newZooKeeperWatcher();
        ZKProcedureMemberRpcs controller = new ZKProcedureMemberRpcs(watcher, opDescription);
        ThreadPoolExecutor pool2 = ProcedureMember.defaultPool(member, 1, KEEP_ALIVE);
        ProcedureMember mem = new ProcedureMember(controller, pool2, subprocFactory);
        members.add(new Pair<>(mem, controller));
        controller.start(member, mem);
    }
    // setup mock subprocedures
    final List<Subprocedure> cohortTasks = new ArrayList<>();
    final int[] elem = new int[1];
    for (int i = 0; i < members.size(); i++) {
        ForeignExceptionDispatcher cohortMonitor = new ForeignExceptionDispatcher();
        final ProcedureMember comms = members.get(i).getFirst();
        Subprocedure commit = Mockito.spy(new SubprocedureImpl(comms, opName, cohortMonitor, WAKE_FREQUENCY, TIMEOUT));
        // This nasty bit has one of the impls throw a TimeoutException
        Mockito.doAnswer(new Answer<Void>() {

            @Override
            public Void answer(InvocationOnMock invocation) throws Throwable {
                int index = elem[0];
                if (index == memberErrorIndex) {
                    LOG.debug("Sending error to coordinator");
                    ForeignException remoteCause = new ForeignException("TIMER", new TimeoutException("subprocTimeout", 1, 2, 0));
                    Subprocedure r = ((Subprocedure) invocation.getMock());
                    LOG.error("Remote commit failure, not propagating error:" + remoteCause);
                    comms.receiveAbortProcedure(r.getName(), remoteCause);
                    assertTrue(r.isComplete());
                    // notification (which ensures that we never progress past prepare)
                    try {
                        Procedure.waitForLatch(coordinatorReceivedErrorLatch, new ForeignExceptionDispatcher(), WAKE_FREQUENCY, "coordinator received error");
                    } catch (InterruptedException e) {
                        LOG.debug("Wait for latch interrupted, done:" + (coordinatorReceivedErrorLatch.getCount() == 0));
                        // reset the interrupt status on the thread
                        Thread.currentThread().interrupt();
                    }
                }
                elem[0] = ++index;
                return null;
            }
        }).when(commit).acquireBarrier();
        cohortTasks.add(commit);
    }
    // pass out a task per member
    final AtomicInteger taskIndex = new AtomicInteger();
    Mockito.when(subprocFactory.buildSubprocedure(Mockito.eq(opName), (byte[]) Mockito.argThat(new ArrayEquals(data)))).thenAnswer(new Answer<Subprocedure>() {

        @Override
        public Subprocedure answer(InvocationOnMock invocation) throws Throwable {
            int index = taskIndex.getAndIncrement();
            Subprocedure commit = cohortTasks.get(index);
            return commit;
        }
    });
    // setup spying on the coordinator
    ForeignExceptionDispatcher coordinatorTaskErrorMonitor = Mockito.spy(new ForeignExceptionDispatcher());
    Procedure coordinatorTask = Mockito.spy(new Procedure(coordinator, coordinatorTaskErrorMonitor, WAKE_FREQUENCY, TIMEOUT, opName, data, expected));
    when(coordinator.createProcedure(any(), eq(opName), eq(data), anyListOf(String.class))).thenReturn(coordinatorTask);
    // count down the error latch when we get the remote error
    Mockito.doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // pass on the error to the master
            invocation.callRealMethod();
            // then count down the got error latch
            coordinatorReceivedErrorLatch.countDown();
            return null;
        }
    }).when(coordinatorTask).receive(Mockito.any());
    // ----------------------------
    // start running the operation
    // ----------------------------
    Procedure task = coordinator.startProcedure(coordinatorTaskErrorMonitor, opName, data, expected);
    assertEquals("Didn't mock coordinator task", coordinatorTask, task);
    // wait for the task to complete
    try {
        task.waitForCompleted();
    } catch (ForeignException fe) {
    // this may get caught or may not
    }
    // -------------
    // verification
    // -------------
    // always expect prepared, never committed, and possible to have cleanup and finish (racy since
    // error case)
    waitAndVerifyProc(coordinatorTask, once, never(), once, atMost(1), true);
    verifyCohortSuccessful(expected, subprocFactory, cohortTasks, once, never(), once, once, true);
    // close all the open things
    closeAll(coordinator, coordinatorController, members);
}
Also used : ArrayList(java.util.ArrayList) ArrayEquals(org.mockito.internal.matchers.ArrayEquals) SubprocedureImpl(org.apache.hadoop.hbase.procedure.Subprocedure.SubprocedureImpl) ZKWatcher(org.apache.hadoop.hbase.zookeeper.ZKWatcher) ForeignException(org.apache.hadoop.hbase.errorhandling.ForeignException) Pair(org.apache.hadoop.hbase.util.Pair) TimeoutException(org.apache.hadoop.hbase.errorhandling.TimeoutException) CountDownLatch(java.util.concurrent.CountDownLatch) ForeignExceptionDispatcher(org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) Test(org.junit.Test)

Aggregations

ZKWatcher (org.apache.hadoop.hbase.zookeeper.ZKWatcher)66 Configuration (org.apache.hadoop.conf.Configuration)27 Test (org.junit.Test)24 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 ArrayList (java.util.ArrayList)10 ServerName (org.apache.hadoop.hbase.ServerName)10 IOException (java.io.IOException)8 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)8 Before (org.junit.Before)8 BeforeClass (org.junit.BeforeClass)8 Admin (org.apache.hadoop.hbase.client.Admin)7 Abortable (org.apache.hadoop.hbase.Abortable)6 HMaster (org.apache.hadoop.hbase.master.HMaster)6 MiniZooKeeperCluster (org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster)6 List (java.util.List)5 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)5 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)5 ReplicationPeerConfig (org.apache.hadoop.hbase.replication.ReplicationPeerConfig)5 KeeperException (org.apache.zookeeper.KeeperException)5 CountDownLatch (java.util.concurrent.CountDownLatch)4