use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class AppenderatorDriverRealtimeIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory(final File directory) {
taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
publishedSegments = new CopyOnWriteArrayList<>();
ObjectMapper mapper = new DefaultObjectMapper();
mapper.registerSubtypes(LinearShardSpec.class);
mapper.registerSubtypes(NumberedShardSpec.class);
IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
Set<DataSegment> result = super.announceHistoricalSegments(segments);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result);
segments.forEach(s -> segmentLatch.countDown());
return result;
}
@Override
public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments, Set<DataSegment> segmentsToDrop, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
SegmentPublishResult result = super.announceHistoricalSegments(segments, segmentsToDrop, startMetadata, endMetadata);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result.getSegments());
result.getSegments().forEach(s -> segmentLatch.countDown());
return result;
}
};
taskLockbox = new TaskLockbox(taskStorage, mdc);
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, mdc, EMITTER, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), (query, future) -> {
// do nothing
})));
handOffCallbacks = new ConcurrentHashMap<>();
final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
handoffLatch.countDown();
return true;
}
@Override
public void start() {
// Noop
}
@Override
public void close() {
// Noop
}
};
final TestUtils testUtils = new TestUtils();
taskToolboxFactory = new TaskToolboxFactory(taskConfig, new DruidNode("druid/middlemanager", "localhost", false, 8091, null, true, false), taskActionClientFactory, EMITTER, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, () -> conglomerate, // queryExecutorService
DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class TaskLifecycleTest method setUpTaskToolboxFactory.
private TaskToolboxFactory setUpTaskToolboxFactory(DataSegmentPusher dataSegmentPusher, SegmentHandoffNotifierFactory handoffNotifierFactory, TestIndexerMetadataStorageCoordinator mdc, AppenderatorsManager appenderatorsManager) throws IOException {
Preconditions.checkNotNull(queryRunnerFactoryConglomerate);
Preconditions.checkNotNull(monitorScheduler);
Preconditions.checkNotNull(taskStorage);
Preconditions.checkNotNull(emitter);
taskLockbox = new TaskLockbox(taskStorage, mdc);
tac = new LocalTaskActionClientFactory(taskStorage, new TaskActionToolbox(taskLockbox, taskStorage, mdc, emitter, EasyMock.createMock(SupervisorManager.class)), new TaskAuditLogConfig(true));
File tmpDir = temporaryFolder.newFolder();
taskConfig = new TaskConfig(tmpDir.toString(), null, null, 50000, null, false, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
return new TaskToolboxFactory(taskConfig, new DruidNode("druid/middlemanager", "localhost", false, 8091, null, true, false), tac, emitter, dataSegmentPusher, new LocalDataSegmentKiller(new LocalDataSegmentPusherConfig()), new DataSegmentMover() {
@Override
public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) {
return dataSegment;
}
}, new DataSegmentArchiver() {
@Override
public DataSegment archive(DataSegment segment) {
return segment;
}
@Override
public DataSegment restore(DataSegment segment) {
return segment;
}
}, new DataSegmentAnnouncer() {
@Override
public void announceSegment(DataSegment segment) {
announcedSinks++;
}
@Override
public void unannounceSegment(DataSegment segment) {
}
@Override
public void announceSegments(Iterable<DataSegment> segments) {
}
@Override
public void unannounceSegments(Iterable<DataSegment> segments) {
}
}, // segment announcer
EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, // query runner factory conglomerate corporation unionized collective
() -> queryRunnerFactoryConglomerate, // query executor service
DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, // monitor scheduler
() -> monitorScheduler, new SegmentCacheManagerFactory(new DefaultObjectMapper()), MAPPER, INDEX_IO, MapCache.create(0), FireDepartmentTest.NO_CACHE_CONFIG, new CachePopulatorStats(), INDEX_MERGER_V9, EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0), new NoopTestTaskReportFileWriter(), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), TEST_UTILS.getRowIngestionMetersFactory(), appenderatorsManager, new NoopIndexingServiceClient(), null, null, null);
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class ThreadingTaskRunnerTest method testTaskStatusWhenTaskThrowsExceptionWhileRunning.
@Test
public void testTaskStatusWhenTaskThrowsExceptionWhileRunning() throws ExecutionException, InterruptedException {
ThreadingTaskRunner runner = new ThreadingTaskRunner(mockTaskToolboxFactory(), new TaskConfig(null, null, null, null, ImmutableList.of(), false, new Period("PT0S"), new Period("PT10S"), ImmutableList.of(), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name()), new WorkerConfig(), new NoopTaskLogs(), new DefaultObjectMapper(), new TestAppenderatorsManager(), new MultipleFileTaskReportFileWriter(), new DruidNode("middleManager", "host", false, 8091, null, true, false));
Future<TaskStatus> statusFuture = runner.run(new AbstractTask("id", "datasource", null) {
@Override
public String getType() {
return "test";
}
@Override
public boolean isReady(TaskActionClient taskActionClient) {
return true;
}
@Override
public void stopGracefully(TaskConfig taskConfig) {
}
@Override
public TaskStatus run(TaskToolbox toolbox) {
throw new RuntimeException("Task failure test");
}
});
TaskStatus status = statusFuture.get();
Assert.assertEquals(TaskState.FAILED, status.getStatusCode());
Assert.assertEquals("Failed with an exception. See indexer logs for more details.", status.getErrorMsg());
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class ForkingTaskRunnerTest method testTaskStatusWhenTaskProcessSucceedsTaskFails.
@Test
public void testTaskStatusWhenTaskProcessSucceedsTaskFails() throws ExecutionException, InterruptedException {
ObjectMapper mapper = new DefaultObjectMapper();
Task task = NoopTask.create();
ForkingTaskRunner forkingTaskRunner = new ForkingTaskRunner(new ForkingTaskRunnerConfig(), new TaskConfig(null, null, null, null, ImmutableList.of(), false, new Period("PT0S"), new Period("PT10S"), ImmutableList.of(), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name()), new WorkerConfig(), new Properties(), new NoopTaskLogs(), mapper, new DruidNode("middleManager", "host", false, 8091, null, true, false), new StartupLoggingConfig()) {
@Override
ProcessHolder runTaskProcess(List<String> command, File logFile, TaskLocation taskLocation) throws IOException {
ProcessHolder processHolder = Mockito.mock(ProcessHolder.class);
Mockito.doNothing().when(processHolder).registerWithCloser(ArgumentMatchers.any());
Mockito.doNothing().when(processHolder).shutdown();
for (String param : command) {
if (param.endsWith("status.json")) {
mapper.writeValue(new File(param), TaskStatus.failure(task.getId(), "task failure test"));
break;
}
}
return processHolder;
}
@Override
int waitForTaskProcessToComplete(Task task, ProcessHolder processHolder, File logFile, File reportsFile) {
return 0;
}
};
final TaskStatus status = forkingTaskRunner.run(task).get();
Assert.assertEquals(TaskState.FAILED, status.getStatusCode());
Assert.assertEquals("task failure test", status.getErrorMsg());
}
use of org.apache.druid.server.DruidNode in project druid by druid-io.
the class ForkingTaskRunnerTest method testTaskStatusWhenTaskProcessSucceedsTaskSucceeds.
@Test
public void testTaskStatusWhenTaskProcessSucceedsTaskSucceeds() throws ExecutionException, InterruptedException {
ObjectMapper mapper = new DefaultObjectMapper();
Task task = NoopTask.create();
ForkingTaskRunner forkingTaskRunner = new ForkingTaskRunner(new ForkingTaskRunnerConfig(), new TaskConfig(null, null, null, null, ImmutableList.of(), false, new Period("PT0S"), new Period("PT10S"), ImmutableList.of(), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name()), new WorkerConfig(), new Properties(), new NoopTaskLogs(), mapper, new DruidNode("middleManager", "host", false, 8091, null, true, false), new StartupLoggingConfig()) {
@Override
ProcessHolder runTaskProcess(List<String> command, File logFile, TaskLocation taskLocation) throws IOException {
ProcessHolder processHolder = Mockito.mock(ProcessHolder.class);
Mockito.doNothing().when(processHolder).registerWithCloser(ArgumentMatchers.any());
Mockito.doNothing().when(processHolder).shutdown();
for (String param : command) {
if (param.endsWith("status.json")) {
mapper.writeValue(new File(param), TaskStatus.success(task.getId()));
break;
}
}
return processHolder;
}
@Override
int waitForTaskProcessToComplete(Task task, ProcessHolder processHolder, File logFile, File reportsFile) {
return 0;
}
};
final TaskStatus status = forkingTaskRunner.run(task).get();
Assert.assertEquals(TaskState.SUCCESS, status.getStatusCode());
Assert.assertNull(status.getErrorMsg());
}
Aggregations