use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class TaskLifecycleTest method testSimple.
@Test
public void testSimple() throws Exception {
final Task task = new AbstractFixedIntervalTask("id1", "id1", new TaskResource("id1", 1), "ds", Intervals.of("2012-01-01/P1D"), null) {
@Override
public String getType() {
return "test";
}
@Override
public void stopGracefully(TaskConfig taskConfig) {
}
@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
final Interval interval = Intervals.of("2012-01-01/P1D");
final TimeChunkLockTryAcquireAction action = new TimeChunkLockTryAcquireAction(TaskLockType.EXCLUSIVE, interval);
final TaskLock lock = toolbox.getTaskActionClient().submit(action);
if (lock == null) {
throw new ISE("Failed to get a lock");
}
final DataSegment segment = DataSegment.builder().dataSource("ds").interval(interval).version(lock.getVersion()).size(0).build();
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(segment)));
return TaskStatus.success(getId());
}
};
final TaskStatus status = runTask(task);
Assert.assertEquals(taskLocation, status.getLocation());
Assert.assertEquals("statusCode", TaskState.SUCCESS, status.getStatusCode());
Assert.assertEquals("segments published", 1, mdc.getPublished().size());
Assert.assertEquals("segments nuked", 0, mdc.getNuked().size());
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class ForkingTaskRunnerTest method testTaskStatusWhenTaskProcessFails.
@Test
public void testTaskStatusWhenTaskProcessFails() throws ExecutionException, InterruptedException {
ForkingTaskRunner forkingTaskRunner = new ForkingTaskRunner(new ForkingTaskRunnerConfig(), new TaskConfig(null, null, null, null, ImmutableList.of(), false, new Period("PT0S"), new Period("PT10S"), ImmutableList.of(), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name()), new WorkerConfig(), new Properties(), new NoopTaskLogs(), new DefaultObjectMapper(), new DruidNode("middleManager", "host", false, 8091, null, true, false), new StartupLoggingConfig()) {
@Override
ProcessHolder runTaskProcess(List<String> command, File logFile, TaskLocation taskLocation) {
ProcessHolder processHolder = Mockito.mock(ProcessHolder.class);
Mockito.doNothing().when(processHolder).registerWithCloser(ArgumentMatchers.any());
Mockito.doNothing().when(processHolder).shutdown();
return processHolder;
}
@Override
int waitForTaskProcessToComplete(Task task, ProcessHolder processHolder, File logFile, File reportsFile) {
// Emulate task process failure
return 1;
}
};
final TaskStatus status = forkingTaskRunner.run(NoopTask.create()).get();
Assert.assertEquals(TaskState.FAILED, status.getStatusCode());
Assert.assertEquals("Task execution process exited unsuccessfully with code[1]. See middleManager logs for more details.", status.getErrorMsg());
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class KafkaIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory() throws IOException {
directory = tempFolder.newFolder();
final TestUtils testUtils = new TestUtils();
rowIngestionMetersFactory = testUtils.getRowIngestionMetersFactory();
final ObjectMapper objectMapper = testUtils.getTestObjectMapper();
for (Module module : new KafkaIndexTaskModule().getJacksonModules()) {
objectMapper.registerModule(module);
}
objectMapper.registerModule(TEST_MODULE);
final TaskConfig taskConfig = new TaskConfig(new File(directory, "baseDir").getPath(), new File(directory, "baseTaskDir").getPath(), null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final TestDerbyConnector derbyConnector = derby.getConnector();
derbyConnector.createDataSourceTable();
derbyConnector.createPendingSegmentsTable();
derbyConnector.createSegmentTable();
derbyConnector.createRulesTable();
derbyConnector.createConfigTable();
derbyConnector.createTaskTables();
derbyConnector.createAuditTable();
taskStorage = new MetadataTaskStorage(derbyConnector, new TaskStorageConfig(null), new DerbyMetadataStorageActionHandlerFactory(derbyConnector, derby.metadataTablesConfigSupplier().get(), objectMapper));
metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(testUtils.getTestObjectMapper(), derby.metadataTablesConfigSupplier().get(), derbyConnector);
taskLockbox = new TaskLockbox(taskStorage, metadataStorageCoordinator);
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, metadataStorageCoordinator, emitter, new SupervisorManager(null) {
@Override
public boolean checkPointDataSourceMetadata(String supervisorId, int taskGroupId, @Nullable DataSourceMetadata previousDataSourceMetadata) {
log.info("Adding checkpoint hash to the set");
checkpointRequestsHash.add(Objects.hash(supervisorId, taskGroupId, previousDataSourceMetadata));
return true;
}
});
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
if (doHandoff) {
// Simulate immediate handoff
exec.execute(handOffRunnable);
}
return true;
}
@Override
public void start() {
// Noop
}
@Override
public void close() {
// Noop
}
};
final LocalDataSegmentPusherConfig dataSegmentPusherConfig = new LocalDataSegmentPusherConfig();
dataSegmentPusherConfig.storageDirectory = getSegmentDirectory();
final DataSegmentPusher dataSegmentPusher = new LocalDataSegmentPusher(dataSegmentPusherConfig);
toolboxFactory = new TaskToolboxFactory(taskConfig, // taskExecutorNode
null, taskActionClientFactory, emitter, dataSegmentPusher, new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, this::makeTimeseriesAndScanConglomerate, DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class WorkerTaskManagerTest method createWorkerTaskManager.
private WorkerTaskManager createWorkerTaskManager() {
TaskConfig taskConfig = new TaskConfig(FileUtils.createTempDir().toString(), null, null, 0, null, false, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
TaskActionClientFactory taskActionClientFactory = EasyMock.createNiceMock(TaskActionClientFactory.class);
TaskActionClient taskActionClient = EasyMock.createNiceMock(TaskActionClient.class);
EasyMock.expect(taskActionClientFactory.create(EasyMock.anyObject())).andReturn(taskActionClient).anyTimes();
SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
EasyMock.replay(taskActionClientFactory, taskActionClient, notifierFactory);
return new WorkerTaskManager(jsonMapper, new TestTaskRunner(new TaskToolboxFactory(taskConfig, null, taskActionClientFactory, null, null, null, null, null, null, null, notifierFactory, null, null, NoopJoinableFactory.INSTANCE, null, new SegmentCacheManagerFactory(jsonMapper), jsonMapper, indexIO, null, null, null, indexMergerV9, null, null, null, null, new NoopTestTaskReportFileWriter(), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null), taskConfig, location), taskConfig, EasyMock.createNiceMock(DruidLeaderClient.class)) {
@Override
protected void taskStarted(String taskId) {
}
@Override
protected void taskAnnouncementChanged(TaskAnnouncement announcement) {
}
};
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class LocalIntermediaryDataManagerAutoCleanupTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
Aggregations