Search in sources :

Example 16 with CuratorFramework

use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project flink by apache.

the class JobManagerLeaderElectionTest method createJobManagerProps.

private Props createJobManagerProps(Configuration configuration) throws Exception {
    LeaderElectionService leaderElectionService;
    if (HighAvailabilityMode.fromConfig(configuration) == HighAvailabilityMode.NONE) {
        leaderElectionService = new StandaloneLeaderElectionService();
    } else {
        CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration);
        leaderElectionService = ZooKeeperUtils.createLeaderElectionService(client, configuration);
    }
    // We don't need recovery in this test
    SubmittedJobGraphStore submittedJobGraphStore = new StandaloneSubmittedJobGraphStore();
    CheckpointRecoveryFactory checkpointRecoveryFactory = new StandaloneCheckpointRecoveryFactory();
    return Props.create(TestingJobManager.class, configuration, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new InstanceManager(), new Scheduler(TestingUtils.defaultExecutionContext()), new BlobLibraryCacheManager(new BlobServer(configuration), 10L), ActorRef.noSender(), new NoRestartStrategy.NoRestartStrategyFactory(), AkkaUtils.getDefaultTimeoutAsFiniteDuration(), leaderElectionService, submittedJobGraphStore, checkpointRecoveryFactory, AkkaUtils.getDefaultTimeoutAsFiniteDuration(), Option.apply(null));
}
Also used : StandaloneSubmittedJobGraphStore(org.apache.flink.runtime.jobmanager.StandaloneSubmittedJobGraphStore) SubmittedJobGraphStore(org.apache.flink.runtime.jobmanager.SubmittedJobGraphStore) BlobLibraryCacheManager(org.apache.flink.runtime.execution.librarycache.BlobLibraryCacheManager) StandaloneCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.StandaloneCheckpointRecoveryFactory) InstanceManager(org.apache.flink.runtime.instance.InstanceManager) Scheduler(org.apache.flink.runtime.jobmanager.scheduler.Scheduler) StandaloneSubmittedJobGraphStore(org.apache.flink.runtime.jobmanager.StandaloneSubmittedJobGraphStore) CheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory) StandaloneCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.StandaloneCheckpointRecoveryFactory) NoRestartStrategy(org.apache.flink.runtime.executiongraph.restart.NoRestartStrategy) CuratorFramework(org.apache.curator.framework.CuratorFramework) BlobServer(org.apache.flink.runtime.blob.BlobServer)

Example 17 with CuratorFramework

use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project hadoop by apache.

the class ZKDelegationTokenSecretManager method startThreads.

@Override
public void startThreads() throws IOException {
    if (!isExternalClient) {
        try {
            zkClient.start();
        } catch (Exception e) {
            throw new IOException("Could not start Curator Framework", e);
        }
    } else {
        // If namespace parents are implicitly created, they won't have ACLs.
        // So, let's explicitly create them.
        CuratorFramework nullNsFw = zkClient.usingNamespace(null);
        EnsurePath ensureNs = nullNsFw.newNamespaceAwareEnsurePath("/" + zkClient.getNamespace());
        try {
            ensureNs.ensure(nullNsFw.getZookeeperClient());
        } catch (Exception e) {
            throw new IOException("Could not create namespace", e);
        }
    }
    listenerThreadPool = Executors.newSingleThreadExecutor();
    try {
        delTokSeqCounter = new SharedCount(zkClient, ZK_DTSM_SEQNUM_ROOT, 0);
        if (delTokSeqCounter != null) {
            delTokSeqCounter.start();
        }
    } catch (Exception e) {
        throw new IOException("Could not start Sequence Counter", e);
    }
    try {
        keyIdSeqCounter = new SharedCount(zkClient, ZK_DTSM_KEYID_ROOT, 0);
        if (keyIdSeqCounter != null) {
            keyIdSeqCounter.start();
        }
    } catch (Exception e) {
        throw new IOException("Could not start KeyId Counter", e);
    }
    try {
        createPersistentNode(ZK_DTSM_MASTER_KEY_ROOT);
        createPersistentNode(ZK_DTSM_TOKENS_ROOT);
    } catch (Exception e) {
        throw new RuntimeException("Could not create ZK paths");
    }
    try {
        keyCache = new PathChildrenCache(zkClient, ZK_DTSM_MASTER_KEY_ROOT, true);
        if (keyCache != null) {
            keyCache.start(StartMode.BUILD_INITIAL_CACHE);
            keyCache.getListenable().addListener(new PathChildrenCacheListener() {

                @Override
                public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
                    switch(event.getType()) {
                        case CHILD_ADDED:
                            processKeyAddOrUpdate(event.getData().getData());
                            break;
                        case CHILD_UPDATED:
                            processKeyAddOrUpdate(event.getData().getData());
                            break;
                        case CHILD_REMOVED:
                            processKeyRemoved(event.getData().getPath());
                            break;
                        default:
                            break;
                    }
                }
            }, listenerThreadPool);
            loadFromZKCache(false);
        }
    } catch (Exception e) {
        throw new IOException("Could not start PathChildrenCache for keys", e);
    }
    try {
        tokenCache = new PathChildrenCache(zkClient, ZK_DTSM_TOKENS_ROOT, true);
        if (tokenCache != null) {
            tokenCache.start(StartMode.BUILD_INITIAL_CACHE);
            tokenCache.getListenable().addListener(new PathChildrenCacheListener() {

                @Override
                public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
                    switch(event.getType()) {
                        case CHILD_ADDED:
                            processTokenAddOrUpdate(event.getData());
                            break;
                        case CHILD_UPDATED:
                            processTokenAddOrUpdate(event.getData());
                            break;
                        case CHILD_REMOVED:
                            processTokenRemoved(event.getData());
                            break;
                        default:
                            break;
                    }
                }
            }, listenerThreadPool);
            loadFromZKCache(true);
        }
    } catch (Exception e) {
        throw new IOException("Could not start PathChildrenCache for tokens", e);
    }
    super.startThreads();
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) EnsurePath(org.apache.curator.utils.EnsurePath) SharedCount(org.apache.curator.framework.recipes.shared.SharedCount) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) IOException(java.io.IOException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException) NoNodeException(org.apache.zookeeper.KeeperException.NoNodeException)

Example 18 with CuratorFramework

use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project druid by druid-io.

the class AnnouncerTest method testSanity.

@Test(timeout = 60_000L)
public void testSanity() throws Exception {
    curator.start();
    curator.blockUntilConnected();
    Announcer announcer = new Announcer(curator, exec);
    final byte[] billy = "billy".getBytes();
    final String testPath1 = "/test1";
    final String testPath2 = "/somewhere/test2";
    announcer.announce(testPath1, billy);
    Assert.assertNull("/test1 does not exists", curator.checkExists().forPath(testPath1));
    Assert.assertNull("/somewhere/test2 does not exists", curator.checkExists().forPath(testPath2));
    announcer.start();
    try {
        Assert.assertArrayEquals("/test1 has data", billy, curator.getData().decompressed().forPath(testPath1));
        Assert.assertNull("/somewhere/test2 still does not exist", curator.checkExists().forPath(testPath2));
        announcer.announce(testPath2, billy);
        Assert.assertArrayEquals("/test1 still has data", billy, curator.getData().decompressed().forPath(testPath1));
        Assert.assertArrayEquals("/somewhere/test2 has data", billy, curator.getData().decompressed().forPath(testPath2));
        final CountDownLatch latch = new CountDownLatch(1);
        curator.getCuratorListenable().addListener(new CuratorListener() {

            @Override
            public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception {
                if (event.getType() == CuratorEventType.CREATE && event.getPath().equals(testPath1)) {
                    latch.countDown();
                }
            }
        });
        curator.inTransaction().delete().forPath(testPath1).and().commit();
        Assert.assertTrue("Wait for /test1 to be created", timing.forWaiting().awaitLatch(latch));
        Assert.assertArrayEquals("expect /test1 data is restored", billy, curator.getData().decompressed().forPath(testPath1));
        Assert.assertArrayEquals("expect /somewhere/test2 is still there", billy, curator.getData().decompressed().forPath(testPath2));
        announcer.unannounce(testPath1);
        Assert.assertNull("expect /test1 unannounced", curator.checkExists().forPath(testPath1));
        Assert.assertArrayEquals("expect /somewhere/test2 is still still there", billy, curator.getData().decompressed().forPath(testPath2));
    } finally {
        announcer.stop();
    }
    Assert.assertNull("expect /test1 remains unannounced", curator.checkExists().forPath(testPath1));
    Assert.assertNull("expect /somewhere/test2 unannounced", curator.checkExists().forPath(testPath2));
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) CuratorListener(org.apache.curator.framework.api.CuratorListener) CuratorEvent(org.apache.curator.framework.api.CuratorEvent) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 19 with CuratorFramework

use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project druid by druid-io.

the class AnnouncerTest method awaitAnnounce.

private void awaitAnnounce(final Announcer announcer, final String path, final byte[] bytes, boolean removeParentsIfCreated) throws InterruptedException {
    final CountDownLatch latch = new CountDownLatch(1);
    curator.getCuratorListenable().addListener(new CuratorListener() {

        @Override
        public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception {
            if (event.getType() == CuratorEventType.CREATE && event.getPath().equals(path)) {
                latch.countDown();
            }
        }
    });
    announcer.announce(path, bytes, removeParentsIfCreated);
    latch.await();
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) CuratorListener(org.apache.curator.framework.api.CuratorListener) CuratorEvent(org.apache.curator.framework.api.CuratorEvent) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 20 with CuratorFramework

use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project druid by druid-io.

the class DruidCoordinatorTest method testCoordinatorRun.

@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
    String dataSource = "dataSource1";
    String tier = "hot";
    // Setup MetadataRuleManager
    Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
    EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
    metadataRuleManager.stop();
    EasyMock.expectLastCall().once();
    EasyMock.replay(metadataRuleManager);
    // Setup MetadataSegmentManager
    DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
    final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
    druidDataSources[0].addSegment("0", dataSegment);
    EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
    EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
    EasyMock.replay(databaseSegmentManager);
    ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
    EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
    EasyMock.replay(immutableDruidDataSource);
    // Setup ServerInventoryView
    druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
    loadManagementPeons.put("server1", loadQueuePeon);
    EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
    EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
    EasyMock.replay(serverInventoryView);
    coordinator.start();
    // Wait for this coordinator to become leader
    leaderAnnouncerLatch.await();
    // This coordinator should be leader by now
    Assert.assertTrue(coordinator.isLeader());
    Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
    final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
    pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {

        @Override
        public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
            if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
                //Coordinator should try to assign segment to druidServer historical
                //Simulate historical loading segment
                druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
                assignSegmentLatch.countDown();
            }
        }
    });
    pathChildrenCache.start();
    assignSegmentLatch.await();
    Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
    curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
    // Wait for coordinator thread to run so that replication status is updated
    while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
        Thread.sleep(50);
    }
    Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
    Assert.assertEquals(1, segmentAvailability.size());
    Assert.assertEquals(0L, segmentAvailability.get(dataSource));
    while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
        Thread.sleep(50);
    }
    // wait historical data to be updated
    long startMillis = System.currentTimeMillis();
    long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
    while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
        Thread.sleep(100);
    }
    Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
    Assert.assertNotNull(replicationStatus);
    Assert.assertEquals(1, replicationStatus.entrySet().size());
    CountingMap<String> dataSourceMap = replicationStatus.get(tier);
    Assert.assertNotNull(dataSourceMap);
    Assert.assertEquals(1, dataSourceMap.size());
    Assert.assertNotNull(dataSourceMap.get(dataSource));
    // The load rules asks for 2 replicas, therefore 1 replica should still be pending
    while (dataSourceMap.get(dataSource).get() != 1L) {
        Thread.sleep(50);
    }
    coordinator.stop();
    leaderUnannouncerLatch.await();
    Assert.assertFalse(coordinator.isLeader());
    Assert.assertNull(coordinator.getCurrentLeader());
    EasyMock.verify(serverInventoryView);
    EasyMock.verify(metadataRuleManager);
}
Also used : ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) PathChildrenCacheEvent(org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent) ImmutableDruidServer(io.druid.client.ImmutableDruidServer) DruidServer(io.druid.client.DruidServer) CountDownLatch(java.util.concurrent.CountDownLatch) ImmutableDruidDataSource(io.druid.client.ImmutableDruidDataSource) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) CountingMap(io.druid.collections.CountingMap) CuratorFramework(org.apache.curator.framework.CuratorFramework) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) CountingMap(io.druid.collections.CountingMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

CuratorFramework (org.apache.curator.framework.CuratorFramework)924 Test (org.testng.annotations.Test)290 RetryOneTime (org.apache.curator.retry.RetryOneTime)271 Test (org.junit.Test)199 Timing (org.apache.curator.test.Timing)147 CountDownLatch (java.util.concurrent.CountDownLatch)124 ExponentialBackoffRetry (org.apache.curator.retry.ExponentialBackoffRetry)114 KeeperException (org.apache.zookeeper.KeeperException)93 IOException (java.io.IOException)79 ConnectionState (org.apache.curator.framework.state.ConnectionState)71 CuratorEvent (org.apache.curator.framework.api.CuratorEvent)58 ExecutorService (java.util.concurrent.ExecutorService)55 ConnectionStateListener (org.apache.curator.framework.state.ConnectionStateListener)53 ArrayList (java.util.ArrayList)51 RetryNTimes (org.apache.curator.retry.RetryNTimes)51 RetryPolicy (org.apache.curator.RetryPolicy)41 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)38 Cleanup (lombok.Cleanup)37 BackgroundCallback (org.apache.curator.framework.api.BackgroundCallback)37 Stat (org.apache.zookeeper.data.Stat)36