use of org.apache.druid.indexing.common.actions.TaskActionClientFactory in project druid by druid-io.
the class TaskQueueTest method testManageInternalReleaseLockWhenTaskIsNotReady.
/**
* This test verifies releasing all locks of a task when it is not ready to run yet.
*
* This test uses 2 APIs, {@link TaskQueue} APIs and {@link IngestionTestBase} APIs
* to emulate the scenario of deadlock. The IngestionTestBase provides low-leve APIs
* which you can manipulate {@link TaskLockbox} manually. These APIs should be used
* only to emulate a certain deadlock scenario. All normal tasks should use TaskQueue
* APIs.
*/
@Test
public void testManageInternalReleaseLockWhenTaskIsNotReady() throws Exception {
final TaskActionClientFactory actionClientFactory = createActionClientFactory();
final TaskQueue taskQueue = new TaskQueue(new TaskLockConfig(), new TaskQueueConfig(null, null, null, null), new DefaultTaskConfig(), getTaskStorage(), new SimpleTaskRunner(actionClientFactory), actionClientFactory, getLockbox(), new NoopServiceEmitter());
taskQueue.setActive(true);
// task1 emulates a case when there is a task that was issued before task2 and acquired locks conflicting
// to task2.
final TestTask task1 = new TestTask("t1", Intervals.of("2021-01/P1M"));
// Manually get locks for task1. task2 cannot be ready because of task1.
prepareTaskForLocking(task1);
Assert.assertTrue(task1.isReady(actionClientFactory.create(task1)));
final TestTask task2 = new TestTask("t2", Intervals.of("2021-01-31/P1M"));
taskQueue.add(task2);
taskQueue.manageInternal();
Assert.assertFalse(task2.isDone());
Assert.assertTrue(getLockbox().findLocksForTask(task2).isEmpty());
// task3 can run because task2 is still blocked by task1.
final TestTask task3 = new TestTask("t3", Intervals.of("2021-02-01/P1M"));
taskQueue.add(task3);
taskQueue.manageInternal();
Assert.assertFalse(task2.isDone());
Assert.assertTrue(task3.isDone());
Assert.assertTrue(getLockbox().findLocksForTask(task2).isEmpty());
// Shut down task1 and task3 and release their locks.
shutdownTask(task1);
taskQueue.shutdown(task3.getId(), "Emulating shutdown of task3");
// Now task2 should run.
taskQueue.manageInternal();
Assert.assertTrue(task2.isDone());
}
use of org.apache.druid.indexing.common.actions.TaskActionClientFactory in project druid by druid-io.
the class TaskQueueTest method testUserProvidedTaskContextOverrideDefaultLineageBasedSegmentAllocation.
@Test
public void testUserProvidedTaskContextOverrideDefaultLineageBasedSegmentAllocation() throws EntryExistsException {
final TaskActionClientFactory actionClientFactory = createActionClientFactory();
final TaskQueue taskQueue = new TaskQueue(new TaskLockConfig(), new TaskQueueConfig(null, null, null, null), new DefaultTaskConfig(), getTaskStorage(), new SimpleTaskRunner(actionClientFactory), actionClientFactory, getLockbox(), new NoopServiceEmitter());
taskQueue.setActive(true);
final Task task = new TestTask("t1", Intervals.of("2021-01-01/P1D"), ImmutableMap.of(SinglePhaseParallelIndexTaskRunner.CTX_USE_LINEAGE_BASED_SEGMENT_ALLOCATION_KEY, false));
taskQueue.add(task);
final List<Task> tasks = taskQueue.getTasks();
Assert.assertEquals(1, tasks.size());
final Task queuedTask = tasks.get(0);
Assert.assertFalse(queuedTask.getContextValue(SinglePhaseParallelIndexTaskRunner.CTX_USE_LINEAGE_BASED_SEGMENT_ALLOCATION_KEY));
}
use of org.apache.druid.indexing.common.actions.TaskActionClientFactory in project druid by druid-io.
the class TaskQueueTest method testLockConfigTakePrecedenceThanDefaultTaskContext.
@Test
public void testLockConfigTakePrecedenceThanDefaultTaskContext() throws EntryExistsException {
final TaskActionClientFactory actionClientFactory = createActionClientFactory();
final TaskQueue taskQueue = new TaskQueue(new TaskLockConfig(), new TaskQueueConfig(null, null, null, null), new DefaultTaskConfig() {
@Override
public Map<String, Object> getContext() {
return ImmutableMap.of(Tasks.FORCE_TIME_CHUNK_LOCK_KEY, false);
}
}, getTaskStorage(), new SimpleTaskRunner(actionClientFactory), actionClientFactory, getLockbox(), new NoopServiceEmitter());
taskQueue.setActive(true);
final Task task = new TestTask("t1", Intervals.of("2021-01-01/P1D"));
taskQueue.add(task);
final List<Task> tasks = taskQueue.getTasks();
Assert.assertEquals(1, tasks.size());
final Task queuedTask = tasks.get(0);
Assert.assertTrue(queuedTask.getContextValue(Tasks.FORCE_TIME_CHUNK_LOCK_KEY));
}
use of org.apache.druid.indexing.common.actions.TaskActionClientFactory in project druid by druid-io.
the class TaskQueueTest method testSetUseLineageBasedSegmentAllocationByDefault.
@Test
public void testSetUseLineageBasedSegmentAllocationByDefault() throws EntryExistsException {
final TaskActionClientFactory actionClientFactory = createActionClientFactory();
final TaskQueue taskQueue = new TaskQueue(new TaskLockConfig(), new TaskQueueConfig(null, null, null, null), new DefaultTaskConfig(), getTaskStorage(), new SimpleTaskRunner(actionClientFactory), actionClientFactory, getLockbox(), new NoopServiceEmitter());
taskQueue.setActive(true);
final Task task = new TestTask("t1", Intervals.of("2021-01-01/P1D"));
taskQueue.add(task);
final List<Task> tasks = taskQueue.getTasks();
Assert.assertEquals(1, tasks.size());
final Task queuedTask = tasks.get(0);
Assert.assertTrue(queuedTask.getContextValue(SinglePhaseParallelIndexTaskRunner.CTX_USE_LINEAGE_BASED_SEGMENT_ALLOCATION_KEY));
}
use of org.apache.druid.indexing.common.actions.TaskActionClientFactory in project druid by druid-io.
the class KafkaIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory() throws IOException {
directory = tempFolder.newFolder();
final TestUtils testUtils = new TestUtils();
rowIngestionMetersFactory = testUtils.getRowIngestionMetersFactory();
final ObjectMapper objectMapper = testUtils.getTestObjectMapper();
for (Module module : new KafkaIndexTaskModule().getJacksonModules()) {
objectMapper.registerModule(module);
}
objectMapper.registerModule(TEST_MODULE);
final TaskConfig taskConfig = new TaskConfig(new File(directory, "baseDir").getPath(), new File(directory, "baseTaskDir").getPath(), null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final TestDerbyConnector derbyConnector = derby.getConnector();
derbyConnector.createDataSourceTable();
derbyConnector.createPendingSegmentsTable();
derbyConnector.createSegmentTable();
derbyConnector.createRulesTable();
derbyConnector.createConfigTable();
derbyConnector.createTaskTables();
derbyConnector.createAuditTable();
taskStorage = new MetadataTaskStorage(derbyConnector, new TaskStorageConfig(null), new DerbyMetadataStorageActionHandlerFactory(derbyConnector, derby.metadataTablesConfigSupplier().get(), objectMapper));
metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(testUtils.getTestObjectMapper(), derby.metadataTablesConfigSupplier().get(), derbyConnector);
taskLockbox = new TaskLockbox(taskStorage, metadataStorageCoordinator);
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, metadataStorageCoordinator, emitter, new SupervisorManager(null) {
@Override
public boolean checkPointDataSourceMetadata(String supervisorId, int taskGroupId, @Nullable DataSourceMetadata previousDataSourceMetadata) {
log.info("Adding checkpoint hash to the set");
checkpointRequestsHash.add(Objects.hash(supervisorId, taskGroupId, previousDataSourceMetadata));
return true;
}
});
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
if (doHandoff) {
// Simulate immediate handoff
exec.execute(handOffRunnable);
}
return true;
}
@Override
public void start() {
// Noop
}
@Override
public void close() {
// Noop
}
};
final LocalDataSegmentPusherConfig dataSegmentPusherConfig = new LocalDataSegmentPusherConfig();
dataSegmentPusherConfig.storageDirectory = getSegmentDirectory();
final DataSegmentPusher dataSegmentPusher = new LocalDataSegmentPusher(dataSegmentPusherConfig);
toolboxFactory = new TaskToolboxFactory(taskConfig, // taskExecutorNode
null, taskActionClientFactory, emitter, dataSegmentPusher, new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, this::makeTimeseriesAndScanConglomerate, DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
Aggregations