use of org.apache.druid.discovery.DruidNodeDiscovery in project druid by druid-io.
the class K8sAnnouncerAndDiscoveryIntTest method testAnnouncementAndDiscoveryWorkflow.
@Test(timeout = 30000L)
public void testAnnouncementAndDiscoveryWorkflow() throws Exception {
K8sApiClient k8sApiClient = new DefaultK8sApiClient(Config.defaultClient(), new DefaultObjectMapper());
K8sDruidNodeDiscoveryProvider discoveryProvider = new K8sDruidNodeDiscoveryProvider(podInfo, discoveryConfig, k8sApiClient);
discoveryProvider.start();
BooleanSupplier nodeInquirer = discoveryProvider.getForNode(testNode.getDruidNode(), NodeRole.ROUTER);
Assert.assertFalse(nodeInquirer.getAsBoolean());
DruidNodeDiscovery discovery = discoveryProvider.getForNodeRole(NodeRole.ROUTER);
CountDownLatch nodeViewInitialized = new CountDownLatch(1);
CountDownLatch nodeAppeared = new CountDownLatch(1);
CountDownLatch nodeDisappeared = new CountDownLatch(1);
discovery.registerListener(new DruidNodeDiscovery.Listener() {
@Override
public void nodesAdded(Collection<DiscoveryDruidNode> nodes) {
Iterator<DiscoveryDruidNode> iter = nodes.iterator();
if (iter.hasNext() && testNode.getDruidNode().getHostAndPort().equals(iter.next().getDruidNode().getHostAndPort())) {
nodeAppeared.countDown();
}
}
@Override
public void nodesRemoved(Collection<DiscoveryDruidNode> nodes) {
Iterator<DiscoveryDruidNode> iter = nodes.iterator();
if (iter.hasNext() && testNode.getDruidNode().getHostAndPort().equals(iter.next().getDruidNode().getHostAndPort())) {
nodeDisappeared.countDown();
}
}
@Override
public void nodeViewInitialized() {
nodeViewInitialized.countDown();
}
});
nodeViewInitialized.await();
K8sDruidNodeAnnouncer announcer = new K8sDruidNodeAnnouncer(podInfo, discoveryConfig, k8sApiClient, jsonMapper);
announcer.announce(testNode);
nodeAppeared.await();
Assert.assertTrue(nodeInquirer.getAsBoolean());
announcer.unannounce(testNode);
nodeDisappeared.await();
Assert.assertFalse(nodeInquirer.getAsBoolean());
discoveryProvider.stop();
}
use of org.apache.druid.discovery.DruidNodeDiscovery in project druid by druid-io.
the class ITHighAvailabilityTest method testDiscoveryAndSelfDiscovery.
@Test
public void testDiscoveryAndSelfDiscovery() {
ITRetryUtil.retryUntil(() -> {
try {
List<DruidNodeDiscovery> disco = ImmutableList.of(druidNodeDiscovery.getForNodeRole(NodeRole.COORDINATOR), druidNodeDiscovery.getForNodeRole(NodeRole.OVERLORD), druidNodeDiscovery.getForNodeRole(NodeRole.HISTORICAL), druidNodeDiscovery.getForNodeRole(NodeRole.MIDDLE_MANAGER), druidNodeDiscovery.getForNodeRole(NodeRole.INDEXER), druidNodeDiscovery.getForNodeRole(NodeRole.BROKER), druidNodeDiscovery.getForNodeRole(NodeRole.ROUTER));
int servicesDiscovered = 0;
for (DruidNodeDiscovery nodeRole : disco) {
Collection<DiscoveryDruidNode> nodes = nodeRole.getAllNodes();
servicesDiscovered += testSelfDiscovery(nodes);
}
return servicesDiscovered > 5;
} catch (Throwable t) {
return false;
}
}, true, RETRY_DELAY, NUM_RETRIES, "Standard services discovered");
}
use of org.apache.druid.discovery.DruidNodeDiscovery in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testFreshStartAndStop.
/*
Simulates startup of Overlord. Overlord is then stopped and is expected to close down certain things.
*/
@Test(timeout = 60_000L)
public void testFreshStartAndStop() {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery).times(2);
ProvisioningStrategy provisioningStrategy = EasyMock.createMock(ProvisioningStrategy.class);
ProvisioningService provisioningService = EasyMock.createNiceMock(ProvisioningService.class);
EasyMock.expect(provisioningStrategy.makeProvisioningService(isA(HttpRemoteTaskRunner.class))).andReturn(provisioningService);
provisioningService.close();
EasyMock.expectLastCall();
EasyMock.replay(druidNodeDiscoveryProvider, provisioningStrategy, provisioningService);
DiscoveryDruidNode druidNode1 = new DiscoveryDruidNode(new DruidNode("service", "host1", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
DiscoveryDruidNode druidNode2 = new DiscoveryDruidNode(new DruidNode("service", "host2", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
HttpRemoteTaskRunner taskRunner = newHttpTaskRunnerInstance(druidNodeDiscoveryProvider, provisioningStrategy);
taskRunner.start();
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode1, druidNode2));
ConcurrentMap<String, WorkerHolder> workers = taskRunner.getWorkersForTestingReadOnly();
Assert.assertEquals(2, workers.size());
Assert.assertTrue(workers.values().stream().noneMatch(w -> w.getUnderlyingSyncer().isExecutorShutdown()));
workers.values().iterator().next().stop();
taskRunner.stop();
Assert.assertTrue(druidNodeDiscovery.getListeners().isEmpty());
Assert.assertEquals(2, workers.size());
Assert.assertTrue(workers.values().stream().allMatch(w -> w.getUnderlyingSyncer().isExecutorShutdown()));
EasyMock.verify(druidNodeDiscoveryProvider, provisioningStrategy, provisioningService);
}
use of org.apache.druid.discovery.DruidNodeDiscovery in project druid by druid-io.
the class CommonCacheNotifier method sendUpdate.
private List<ListenableFuture<StatusResponseHolder>> sendUpdate(String updatedAuthenticatorPrefix, byte[] serializedEntity) {
List<ListenableFuture<StatusResponseHolder>> futures = new ArrayList<>();
for (NodeRole nodeRole : NODE_TYPES) {
DruidNodeDiscovery nodeDiscovery = discoveryProvider.getForNodeRole(nodeRole);
Collection<DiscoveryDruidNode> nodes = nodeDiscovery.getAllNodes();
for (DiscoveryDruidNode node : nodes) {
URL listenerURL = getListenerURL(node.getDruidNode(), StringUtils.format(baseUrl, StringUtils.urlEncode(updatedAuthenticatorPrefix)));
// best effort, if this fails, remote node will poll and pick up the update eventually
Request req = new Request(HttpMethod.POST, listenerURL);
req.setContent(MediaType.APPLICATION_JSON, serializedEntity);
BasicAuthDBConfig itemConfig = itemConfigMap.get(updatedAuthenticatorPrefix);
ListenableFuture<StatusResponseHolder> future = httpClient.go(req, new ResponseHandler(), Duration.millis(itemConfig.getCacheNotificationTimeout()));
futures.add(future);
}
}
return futures;
}
use of org.apache.druid.discovery.DruidNodeDiscovery in project druid by druid-io.
the class HttpRemoteTaskRunner method startWorkersHandling.
private void startWorkersHandling() throws InterruptedException {
final CountDownLatch workerViewInitialized = new CountDownLatch(1);
DruidNodeDiscovery druidNodeDiscovery = druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY);
this.nodeDiscoveryListener = new DruidNodeDiscovery.Listener() {
@Override
public void nodesAdded(Collection<DiscoveryDruidNode> nodes) {
nodes.forEach(node -> addWorker(toWorker(node)));
}
@Override
public void nodesRemoved(Collection<DiscoveryDruidNode> nodes) {
nodes.forEach(node -> removeWorker(toWorker(node)));
}
@Override
public void nodeViewInitialized() {
// CountDownLatch.countDown() does nothing when count has already reached 0.
workerViewInitialized.countDown();
}
};
druidNodeDiscovery.registerListener(nodeDiscoveryListener);
long workerDiscoveryStartTime = System.currentTimeMillis();
while (!workerViewInitialized.await(30, TimeUnit.SECONDS)) {
if (System.currentTimeMillis() - workerDiscoveryStartTime > TimeUnit.MINUTES.toMillis(5)) {
throw new ISE("Couldn't discover workers.");
} else {
log.info("Waiting for worker discovery...");
}
}
log.info("[%s] Workers are discovered.", workers.size());
// We would start assigning tasks which are pretty soon going to be reported by discovered workers.
for (WorkerHolder worker : workers.values()) {
log.info("Waiting for worker[%s] to sync state...", worker.getWorker().getHost());
worker.waitForInitialization();
}
log.info("Workers have sync'd state successfully.");
}
Aggregations