use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project flink by apache.
the class JobManagerLeaderElectionTest method createJobManagerProps.
private Props createJobManagerProps(Configuration configuration) throws Exception {
LeaderElectionService leaderElectionService;
if (HighAvailabilityMode.fromConfig(configuration) == HighAvailabilityMode.NONE) {
leaderElectionService = new StandaloneLeaderElectionService();
} else {
CuratorFramework client = ZooKeeperUtils.startCuratorFramework(configuration);
leaderElectionService = ZooKeeperUtils.createLeaderElectionService(client, configuration);
}
// We don't need recovery in this test
SubmittedJobGraphStore submittedJobGraphStore = new StandaloneSubmittedJobGraphStore();
CheckpointRecoveryFactory checkpointRecoveryFactory = new StandaloneCheckpointRecoveryFactory();
return Props.create(TestingJobManager.class, configuration, TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), new InstanceManager(), new Scheduler(TestingUtils.defaultExecutionContext()), new BlobLibraryCacheManager(new BlobServer(configuration), 10L), ActorRef.noSender(), new NoRestartStrategy.NoRestartStrategyFactory(), AkkaUtils.getDefaultTimeoutAsFiniteDuration(), leaderElectionService, submittedJobGraphStore, checkpointRecoveryFactory, AkkaUtils.getDefaultTimeoutAsFiniteDuration(), Option.apply(null));
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project hadoop by apache.
the class ZKDelegationTokenSecretManager method startThreads.
@Override
public void startThreads() throws IOException {
if (!isExternalClient) {
try {
zkClient.start();
} catch (Exception e) {
throw new IOException("Could not start Curator Framework", e);
}
} else {
// If namespace parents are implicitly created, they won't have ACLs.
// So, let's explicitly create them.
CuratorFramework nullNsFw = zkClient.usingNamespace(null);
EnsurePath ensureNs = nullNsFw.newNamespaceAwareEnsurePath("/" + zkClient.getNamespace());
try {
ensureNs.ensure(nullNsFw.getZookeeperClient());
} catch (Exception e) {
throw new IOException("Could not create namespace", e);
}
}
listenerThreadPool = Executors.newSingleThreadExecutor();
try {
delTokSeqCounter = new SharedCount(zkClient, ZK_DTSM_SEQNUM_ROOT, 0);
if (delTokSeqCounter != null) {
delTokSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start Sequence Counter", e);
}
try {
keyIdSeqCounter = new SharedCount(zkClient, ZK_DTSM_KEYID_ROOT, 0);
if (keyIdSeqCounter != null) {
keyIdSeqCounter.start();
}
} catch (Exception e) {
throw new IOException("Could not start KeyId Counter", e);
}
try {
createPersistentNode(ZK_DTSM_MASTER_KEY_ROOT);
createPersistentNode(ZK_DTSM_TOKENS_ROOT);
} catch (Exception e) {
throw new RuntimeException("Could not create ZK paths");
}
try {
keyCache = new PathChildrenCache(zkClient, ZK_DTSM_MASTER_KEY_ROOT, true);
if (keyCache != null) {
keyCache.start(StartMode.BUILD_INITIAL_CACHE);
keyCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_UPDATED:
processKeyAddOrUpdate(event.getData().getData());
break;
case CHILD_REMOVED:
processKeyRemoved(event.getData().getPath());
break;
default:
break;
}
}
}, listenerThreadPool);
loadFromZKCache(false);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for keys", e);
}
try {
tokenCache = new PathChildrenCache(zkClient, ZK_DTSM_TOKENS_ROOT, true);
if (tokenCache != null) {
tokenCache.start(StartMode.BUILD_INITIAL_CACHE);
tokenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception {
switch(event.getType()) {
case CHILD_ADDED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_UPDATED:
processTokenAddOrUpdate(event.getData());
break;
case CHILD_REMOVED:
processTokenRemoved(event.getData());
break;
default:
break;
}
}
}, listenerThreadPool);
loadFromZKCache(true);
}
} catch (Exception e) {
throw new IOException("Could not start PathChildrenCache for tokens", e);
}
super.startThreads();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project druid by druid-io.
the class AnnouncerTest method testSanity.
@Test(timeout = 60_000L)
public void testSanity() throws Exception {
curator.start();
curator.blockUntilConnected();
Announcer announcer = new Announcer(curator, exec);
final byte[] billy = "billy".getBytes();
final String testPath1 = "/test1";
final String testPath2 = "/somewhere/test2";
announcer.announce(testPath1, billy);
Assert.assertNull("/test1 does not exists", curator.checkExists().forPath(testPath1));
Assert.assertNull("/somewhere/test2 does not exists", curator.checkExists().forPath(testPath2));
announcer.start();
try {
Assert.assertArrayEquals("/test1 has data", billy, curator.getData().decompressed().forPath(testPath1));
Assert.assertNull("/somewhere/test2 still does not exist", curator.checkExists().forPath(testPath2));
announcer.announce(testPath2, billy);
Assert.assertArrayEquals("/test1 still has data", billy, curator.getData().decompressed().forPath(testPath1));
Assert.assertArrayEquals("/somewhere/test2 has data", billy, curator.getData().decompressed().forPath(testPath2));
final CountDownLatch latch = new CountDownLatch(1);
curator.getCuratorListenable().addListener(new CuratorListener() {
@Override
public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception {
if (event.getType() == CuratorEventType.CREATE && event.getPath().equals(testPath1)) {
latch.countDown();
}
}
});
curator.inTransaction().delete().forPath(testPath1).and().commit();
Assert.assertTrue("Wait for /test1 to be created", timing.forWaiting().awaitLatch(latch));
Assert.assertArrayEquals("expect /test1 data is restored", billy, curator.getData().decompressed().forPath(testPath1));
Assert.assertArrayEquals("expect /somewhere/test2 is still there", billy, curator.getData().decompressed().forPath(testPath2));
announcer.unannounce(testPath1);
Assert.assertNull("expect /test1 unannounced", curator.checkExists().forPath(testPath1));
Assert.assertArrayEquals("expect /somewhere/test2 is still still there", billy, curator.getData().decompressed().forPath(testPath2));
} finally {
announcer.stop();
}
Assert.assertNull("expect /test1 remains unannounced", curator.checkExists().forPath(testPath1));
Assert.assertNull("expect /somewhere/test2 unannounced", curator.checkExists().forPath(testPath2));
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project druid by druid-io.
the class AnnouncerTest method awaitAnnounce.
private void awaitAnnounce(final Announcer announcer, final String path, final byte[] bytes, boolean removeParentsIfCreated) throws InterruptedException {
final CountDownLatch latch = new CountDownLatch(1);
curator.getCuratorListenable().addListener(new CuratorListener() {
@Override
public void eventReceived(CuratorFramework client, CuratorEvent event) throws Exception {
if (event.getType() == CuratorEventType.CREATE && event.getPath().equals(path)) {
latch.countDown();
}
}
});
announcer.announce(path, bytes, removeParentsIfCreated);
latch.await();
}
use of org.apache.flink.shaded.curator5.org.apache.curator.framework.CuratorFramework in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorRun.
@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
String dataSource = "dataSource1";
String tier = "hot";
// Setup MetadataRuleManager
Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
metadataRuleManager.stop();
EasyMock.expectLastCall().once();
EasyMock.replay(metadataRuleManager);
// Setup MetadataSegmentManager
DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
druidDataSources[0].addSegment("0", dataSegment);
EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
EasyMock.replay(databaseSegmentManager);
ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
EasyMock.replay(immutableDruidDataSource);
// Setup ServerInventoryView
druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
loadManagementPeons.put("server1", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
coordinator.start();
// Wait for this coordinator to become leader
leaderAnnouncerLatch.await();
// This coordinator should be leader by now
Assert.assertTrue(coordinator.isLeader());
Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
//Coordinator should try to assign segment to druidServer historical
//Simulate historical loading segment
druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
assignSegmentLatch.countDown();
}
}
});
pathChildrenCache.start();
assignSegmentLatch.await();
Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
// Wait for coordinator thread to run so that replication status is updated
while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
Thread.sleep(50);
}
Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
Assert.assertEquals(1, segmentAvailability.size());
Assert.assertEquals(0L, segmentAvailability.get(dataSource));
while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
Thread.sleep(50);
}
// wait historical data to be updated
long startMillis = System.currentTimeMillis();
long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
Thread.sleep(100);
}
Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
Assert.assertNotNull(replicationStatus);
Assert.assertEquals(1, replicationStatus.entrySet().size());
CountingMap<String> dataSourceMap = replicationStatus.get(tier);
Assert.assertNotNull(dataSourceMap);
Assert.assertEquals(1, dataSourceMap.size());
Assert.assertNotNull(dataSourceMap.get(dataSource));
// The load rules asks for 2 replicas, therefore 1 replica should still be pending
while (dataSourceMap.get(dataSource).get() != 1L) {
Thread.sleep(50);
}
coordinator.stop();
leaderUnannouncerLatch.await();
Assert.assertFalse(coordinator.isLeader());
Assert.assertNull(coordinator.getCurrentLeader());
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
Aggregations