use of org.apache.druid.server.initialization.IndexerZkConfig in project druid by druid-io.
the class WorkerTaskMonitorTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
worker = new Worker("http", "worker", "localhost", 3, "0", WorkerConfig.DEFAULT_CATEGORY);
workerCuratorCoordinator = new WorkerCuratorCoordinator(jsonMapper, new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return BASE_PATH;
}
}, null, null, null, null), new TestRemoteTaskRunnerConfig(new Period("PT1S")), cf, worker);
workerCuratorCoordinator.start();
// Start a task monitor
workerTaskMonitor = createTaskMonitor();
TestTasks.registerSubtypes(jsonMapper);
jsonMapper.registerSubtypes(new NamedType(TestRealtimeTask.class, "test_realtime"));
workerTaskMonitor.start();
task = TestTasks.immediateSuccess("test");
}
use of org.apache.druid.server.initialization.IndexerZkConfig in project druid by apache.
the class WorkerResourceTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(BASE_PATH);
worker = new Worker("http", "host", "ip", 3, "v1", WorkerConfig.DEFAULT_CATEGORY);
curatorCoordinator = new WorkerCuratorCoordinator(JSON_MAPPER, new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return BASE_PATH;
}
}, null, null, null, null), new RemoteTaskRunnerConfig(), cf, worker);
curatorCoordinator.start();
workerResource = new WorkerResource(worker, () -> curatorCoordinator, null, EasyMock.createNiceMock(WorkerTaskMonitor.class), ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.server.initialization.IndexerZkConfig in project druid by apache.
the class RemoteTaskRunnerTestUtils method makeRemoteTaskRunner.
public RemoteTaskRunner makeRemoteTaskRunner(RemoteTaskRunnerConfig config, ProvisioningStrategy<WorkerTaskRunner> provisioningStrategy) {
RemoteTaskRunner remoteTaskRunner = new TestableRemoteTaskRunner(jsonMapper, config, new IndexerZkConfig(new ZkPathsConfig() {
@Override
public String getBase() {
return BASE_PATH;
}
}, null, null, null, null), cf, new PathChildrenCacheFactory.Builder(), null, DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), provisioningStrategy);
remoteTaskRunner.start();
return remoteTaskRunner;
}
use of org.apache.druid.server.initialization.IndexerZkConfig in project druid by apache.
the class HttpRemoteTaskRunnerTest method testMarkWorkersLazy.
@Test(timeout = 60_000L)
public void testMarkWorkersLazy() throws Exception {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
Task task1 = NoopTask.create("task-id-1", 0);
Task task2 = NoopTask.create("task-id-2", 0);
String additionalWorkerCategory = "category2";
ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 3;
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
if (workerHolders.containsKey(worker.getHost())) {
return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
} else {
throw new ISE("No WorkerHolder for [%s].", worker.getHost());
}
}
};
taskRunner.start();
Assert.assertTrue(taskRunner.getTotalTaskSlotCount().isEmpty());
Assert.assertTrue(taskRunner.getIdleTaskSlotCount().isEmpty());
Assert.assertTrue(taskRunner.getUsedTaskSlotCount().isEmpty());
AtomicInteger ticks = new AtomicInteger();
DiscoveryDruidNode druidNode1 = new DiscoveryDruidNode(new DruidNode("service", "host1", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 1, "0", WorkerConfig.DEFAULT_CATEGORY)));
workerHolders.put("host1:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task1, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.unknown()), TaskAnnouncement.create(task1, TaskStatus.running(task1.getId()), TaskLocation.create("host1", 8080, -1)))), ticks, ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode1));
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
taskRunner.run(task1);
while (ticks.get() < 1) {
Thread.sleep(100);
}
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
DiscoveryDruidNode druidNode2 = new DiscoveryDruidNode(new DruidNode("service", "host2", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 1, "0", additionalWorkerCategory)));
workerHolders.put("host2:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(task2, ImmutableList.of()), ticks, ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode2));
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
taskRunner.run(task2);
while (ticks.get() < 2) {
Thread.sleep(100);
}
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
DiscoveryDruidNode druidNode3 = new DiscoveryDruidNode(new DruidNode("service", "host3", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip2", 1, "0", WorkerConfig.DEFAULT_CATEGORY)));
workerHolders.put("host3:8080", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(), ImmutableMap.of(), new AtomicInteger(), ImmutableSet.of()));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode3));
Assert.assertEquals(2, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(1, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(WorkerConfig.DEFAULT_CATEGORY));
Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(task1.getId(), Iterables.getOnlyElement(taskRunner.getRunningTasks()).getTaskId());
Assert.assertEquals(task2.getId(), Iterables.getOnlyElement(taskRunner.getPendingTasks()).getTaskId());
Assert.assertEquals("host3:8080", Iterables.getOnlyElement(taskRunner.markWorkersLazy(Predicates.alwaysTrue(), Integer.MAX_VALUE)).getHost());
Assert.assertEquals(2, taskRunner.getTotalTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(1, taskRunner.getTotalTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(0, taskRunner.getIdleTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getIdleTaskSlotCount().containsKey(additionalWorkerCategory));
Assert.assertEquals(1, taskRunner.getUsedTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertEquals(0, taskRunner.getUsedTaskSlotCount().get(additionalWorkerCategory).longValue());
Assert.assertEquals(1, taskRunner.getLazyTaskSlotCount().get(WorkerConfig.DEFAULT_CATEGORY).longValue());
Assert.assertFalse(taskRunner.getLazyTaskSlotCount().containsKey(additionalWorkerCategory));
}
use of org.apache.druid.server.initialization.IndexerZkConfig in project druid by apache.
the class HttpRemoteTaskRunnerTest method testFreshStartAndStopNoProvisioner.
/*
Simulates startup of Overlord with no provisoner. Overlord is then stopped and is expected to close down certain
things.
*/
@Test(timeout = 60_000L)
public void testFreshStartAndStopNoProvisioner() {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
ProvisioningStrategy provisioningStrategy = EasyMock.createMock(ProvisioningStrategy.class);
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery).times(2);
EasyMock.expect(provisioningStrategy.makeProvisioningService(isA(HttpRemoteTaskRunner.class))).andReturn(null);
EasyMock.expectLastCall();
EasyMock.replay(druidNodeDiscoveryProvider, provisioningStrategy);
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 3;
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), provisioningStrategy, druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
return HttpRemoteTaskRunnerTest.createWorkerHolder(smileMapper, httpClient, config, workersSyncExec, listener, worker, ImmutableList.of(), ImmutableList.of(), ImmutableMap.of(), new AtomicInteger(), ImmutableSet.of());
}
};
taskRunner.start();
taskRunner.stop();
EasyMock.verify(druidNodeDiscoveryProvider, provisioningStrategy);
}
Aggregations