use of org.apache.druid.discovery.DruidNodeDiscoveryProvider in project druid by druid-io.
the class SystemSchemaTest method setUp.
@Before
public void setUp() throws Exception {
serverView = EasyMock.createNiceMock(TimelineServerView.class);
client = EasyMock.createMock(DruidLeaderClient.class);
coordinatorClient = EasyMock.createMock(DruidLeaderClient.class);
overlordClient = EasyMock.createMock(DruidLeaderClient.class);
mapper = TestHelper.makeJsonMapper();
responseHolder = EasyMock.createMock(StringFullResponseHolder.class);
responseHandler = EasyMock.createMockBuilder(BytesAccumulatingResponseHandler.class).withConstructor().addMockedMethod("handleResponse", HttpResponse.class, HttpResponseHandler.TrafficCop.class).addMockedMethod("getStatus").createMock();
request = EasyMock.createMock(Request.class);
authMapper = createAuthMapper();
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
final QueryableIndex index3 = IndexBuilder.create().tmpDir(new File(tmpDir, "3")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS3).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(segment1, index1).add(segment2, index2).add(segment3, index3);
druidSchema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), new TestServerInventoryView(walker.getSegments(), realtimeSegments), new SegmentManager(EasyMock.createMock(SegmentLoader.class)), new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null);
druidSchema.start();
druidSchema.awaitInitialization();
metadataView = EasyMock.createMock(MetadataSegmentView.class);
druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
serverInventoryView = EasyMock.createMock(FilteredServerInventoryView.class);
schema = new SystemSchema(druidSchema, metadataView, serverView, serverInventoryView, EasyMock.createStrictMock(AuthorizerMapper.class), client, client, druidNodeDiscoveryProvider, mapper);
}
use of org.apache.druid.discovery.DruidNodeDiscoveryProvider in project druid by druid-io.
the class TieredBrokerHostSelectorTest method setUp.
@Before
public void setUp() {
druidNodeDiscoveryProvider = EasyMock.createStrictMock(DruidNodeDiscoveryProvider.class);
node1 = new DiscoveryDruidNode(new DruidNode("hotBroker", "hotHost", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
node2 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost1", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
node3 = new DiscoveryDruidNode(new DruidNode("coldBroker", "coldHost2", false, 8080, null, true, false), NodeRole.BROKER, ImmutableMap.of());
druidNodeDiscovery = new DruidNodeDiscovery() {
@Override
public Collection<DiscoveryDruidNode> getAllNodes() {
return ImmutableSet.of(node1, node2, node3);
}
@Override
public void registerListener(Listener listener) {
listener.nodesAdded(ImmutableList.of(node1, node2, node3));
listener.nodeViewInitialized();
}
};
EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
brokerSelector = new TieredBrokerHostSelector(new TestRuleManager(null, null), new TieredBrokerConfig() {
@Override
public LinkedHashMap<String, String> getTierToBrokerMap() {
return new LinkedHashMap<String, String>(ImmutableMap.of("hot", "hotBroker", "medium", "mediumBroker", DruidServer.DEFAULT_TIER, "coldBroker"));
}
@Override
public String getDefaultBrokerServiceName() {
return "hotBroker";
}
}, druidNodeDiscoveryProvider, Arrays.asList(new ManualTieredBrokerSelectorStrategy(null), new TimeBoundaryTieredBrokerSelectorStrategy(), new PriorityTieredBrokerSelectorStrategy(0, 1)));
brokerSelector.start();
}
use of org.apache.druid.discovery.DruidNodeDiscoveryProvider in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testFreshStartAndStopNoProvisioner.
/*
Simulates startup of Overlord with no provisoner. Overlord is then stopped and is expected to close down certain
things.
*/
@Test(timeout = 60_000L)
public void testFreshStartAndStopNoProvisioner() {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
ProvisioningStrategy provisioningStrategy = EasyMock.createMock(ProvisioningStrategy.class);
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery).times(2);
EasyMock.expect(provisioningStrategy.makeProvisioningService(isA(HttpRemoteTaskRunner.class))).andReturn(null);
EasyMock.expectLastCall();
EasyMock.replay(druidNodeDiscoveryProvider, provisioningStrategy);
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 3;
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), provisioningStrategy, druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
return HttpRemoteTaskRunnerTest.createWorkerHolder(smileMapper, httpClient, config, workersSyncExec, listener, worker, ImmutableList.of(), ImmutableList.of(), ImmutableMap.of(), new AtomicInteger(), ImmutableSet.of());
}
};
taskRunner.start();
taskRunner.stop();
EasyMock.verify(druidNodeDiscoveryProvider, provisioningStrategy);
}
use of org.apache.druid.discovery.DruidNodeDiscoveryProvider in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testTimeoutInAssigningTasks.
@Test
public void testTimeoutInAssigningTasks() throws Exception {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 1;
}
@Override
public Period getTaskAssignmentTimeout() {
return new Period("PT1S");
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, EasyMock.createNiceMock(TaskStorage.class), EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
return new WorkerHolder(smileMapper, httpClient, config, workersSyncExec, listener, worker, ImmutableList.of()) {
@Override
public void start() {
disabled.set(false);
}
@Override
public void stop() {
}
@Override
public boolean isInitialized() {
return true;
}
@Override
public void waitForInitialization() {
}
@Override
public boolean assignTask(Task task) {
// Always returns true
return true;
}
@Override
public void shutdownTask(String taskId) {
}
};
}
};
taskRunner.start();
DiscoveryDruidNode druidNode1 = new DiscoveryDruidNode(new DruidNode("service", "host1", false, 8080, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode1));
Future<TaskStatus> future = taskRunner.run(NoopTask.create("task-id", 0));
Assert.assertTrue(future.get().isFailure());
Assert.assertNotNull(future.get().getErrorMsg());
Assert.assertTrue(future.get().getErrorMsg().startsWith("The worker that this task is assigned did not start it in timeout"));
}
use of org.apache.druid.discovery.DruidNodeDiscoveryProvider in project druid by druid-io.
the class HttpRemoteTaskRunnerTest method testTaskRunnerRestart.
/*
Simulates restart of the Overlord where taskRunner, on start, discovers workers with prexisting tasks.
*/
@Test(timeout = 60_000L)
public void testTaskRunnerRestart() throws Exception {
TestDruidNodeDiscovery druidNodeDiscovery = new TestDruidNodeDiscovery();
DruidNodeDiscoveryProvider druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
EasyMock.expect(druidNodeDiscoveryProvider.getForService(WorkerNodeService.DISCOVERY_SERVICE_KEY)).andReturn(druidNodeDiscovery);
EasyMock.replay(druidNodeDiscoveryProvider);
ConcurrentMap<String, CustomFunction> workerHolders = new ConcurrentHashMap<>();
Task task1 = NoopTask.create("task-id-1", 0);
Task task2 = NoopTask.create("task-id-2", 0);
Task task3 = NoopTask.create("task-id-3", 0);
Task task4 = NoopTask.create("task-id-4", 0);
Task task5 = NoopTask.create("task-id-5", 0);
TaskStorage taskStorageMock = EasyMock.createStrictMock(TaskStorage.class);
EasyMock.expect(taskStorageMock.getStatus(task1.getId())).andReturn(Optional.absent());
EasyMock.expect(taskStorageMock.getStatus(task2.getId())).andReturn(Optional.absent()).times(2);
EasyMock.expect(taskStorageMock.getStatus(task3.getId())).andReturn(Optional.of(TaskStatus.running(task3.getId())));
EasyMock.expect(taskStorageMock.getStatus(task4.getId())).andReturn(Optional.of(TaskStatus.running(task4.getId())));
EasyMock.expect(taskStorageMock.getStatus(task5.getId())).andReturn(Optional.of(TaskStatus.success(task5.getId())));
EasyMock.replay(taskStorageMock);
HttpRemoteTaskRunner taskRunner = new HttpRemoteTaskRunner(TestHelper.makeJsonMapper(), new HttpRemoteTaskRunnerConfig() {
@Override
public int getPendingTasksRunnerNumThreads() {
return 3;
}
}, EasyMock.createNiceMock(HttpClient.class), DSuppliers.of(new AtomicReference<>(DefaultWorkerBehaviorConfig.defaultConfig())), new NoopProvisioningStrategy<>(), druidNodeDiscoveryProvider, taskStorageMock, EasyMock.createNiceMock(CuratorFramework.class), new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null)) {
@Override
protected WorkerHolder createWorkerHolder(ObjectMapper smileMapper, HttpClient httpClient, HttpRemoteTaskRunnerConfig config, ScheduledExecutorService workersSyncExec, WorkerHolder.Listener listener, Worker worker, List<TaskAnnouncement> knownAnnouncements) {
if (workerHolders.containsKey(worker.getHost())) {
return workerHolders.get(worker.getHost()).apply(smileMapper, httpClient, config, workersSyncExec, listener, worker, knownAnnouncements);
} else {
throw new ISE("No WorkerHolder for [%s].", worker.getHost());
}
}
};
taskRunner.start();
DiscoveryDruidNode druidNode = new DiscoveryDruidNode(new DruidNode("service", "host", false, 1234, null, true, false), NodeRole.MIDDLE_MANAGER, ImmutableMap.of(WorkerNodeService.DISCOVERY_SERVICE_KEY, new WorkerNodeService("ip1", 2, "0", WorkerConfig.DEFAULT_CATEGORY)));
AtomicInteger ticks = new AtomicInteger();
Set<String> taskShutdowns = new HashSet<>();
workerHolders.put("host:1234", (mapper, httpClient, config, exec, listener, worker, knownAnnouncements) -> createWorkerHolder(mapper, httpClient, config, exec, listener, worker, knownAnnouncements, ImmutableList.of(TaskAnnouncement.create(task1, TaskStatus.success(task1.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task2, TaskStatus.running(task2.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task2, TaskStatus.success(task2.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task3, TaskStatus.success(task3.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task4, TaskStatus.running(task4.getId()), TaskLocation.create("host", 1234, 1235)), TaskAnnouncement.create(task5, TaskStatus.running(task5.getId()), TaskLocation.create("host", 1234, 1235))), ImmutableMap.of(), ticks, taskShutdowns));
druidNodeDiscovery.getListeners().get(0).nodesAdded(ImmutableList.of(druidNode));
while (ticks.get() < 1) {
Thread.sleep(100);
}
EasyMock.verify(taskStorageMock);
Assert.assertEquals(ImmutableSet.of(task2.getId(), task5.getId()), taskShutdowns);
Assert.assertTrue(taskRunner.getPendingTasks().isEmpty());
TaskRunnerWorkItem item = Iterables.getOnlyElement(taskRunner.getRunningTasks());
Assert.assertEquals(task4.getId(), item.getTaskId());
Assert.assertTrue(taskRunner.run(task3).get().isSuccess());
Assert.assertEquals(2, taskRunner.getKnownTasks().size());
}
Aggregations