use of org.apache.druid.indexing.common.task.TaskResource in project druid by druid-io.
the class ImmutableWorkerInfoTest method test_canRunTask.
@Test
public void test_canRunTask() {
ImmutableWorkerInfo workerInfo = new ImmutableWorkerInfo(new Worker("http", "testWorker2", "192.0.0.1", 10, "v1", WorkerConfig.DEFAULT_CATEGORY), 6, 0, ImmutableSet.of("grp1", "grp2"), ImmutableSet.of("task1", "task2"), DateTimes.of("2015-01-01T01:01:02Z"));
// Parallel index task
TaskResource taskResource0 = mock(TaskResource.class);
when(taskResource0.getRequiredCapacity()).thenReturn(3);
Task parallelIndexTask = mock(ParallelIndexSupervisorTask.class);
when(parallelIndexTask.getType()).thenReturn(ParallelIndexSupervisorTask.TYPE);
when(parallelIndexTask.getTaskResource()).thenReturn(taskResource0);
// Since task satisifies parallel and total slot constraints, can run
Assert.assertTrue(workerInfo.canRunTask(parallelIndexTask, 0.5));
// Since task fails the parallel slot constraint, it cannot run (3 > 1)
Assert.assertFalse(workerInfo.canRunTask(parallelIndexTask, 0.1));
// Some other indexing task
TaskResource taskResource1 = mock(TaskResource.class);
when(taskResource1.getRequiredCapacity()).thenReturn(5);
Task anyOtherTask = mock(IndexTask.class);
when(anyOtherTask.getType()).thenReturn("index");
when(anyOtherTask.getTaskResource()).thenReturn(taskResource1);
// Not a parallel index task -> satisfies parallel index constraint
// But does not satisfy the total slot constraint and cannot run (11 > 10)
Assert.assertFalse(workerInfo.canRunTask(anyOtherTask, 0.5));
// Task has an availability conflict ("grp1")
TaskResource taskResource2 = mock(TaskResource.class);
when(taskResource2.getRequiredCapacity()).thenReturn(1);
when(taskResource2.getAvailabilityGroup()).thenReturn("grp1");
Task grp1Task = mock(IndexTask.class);
when(grp1Task.getType()).thenReturn("blah");
when(grp1Task.getTaskResource()).thenReturn(taskResource2);
// Satisifies parallel index and total index slot constraints but cannot run due availability
Assert.assertFalse(workerInfo.canRunTask(grp1Task, 0.3));
}
use of org.apache.druid.indexing.common.task.TaskResource in project druid by druid-io.
the class KinesisSupervisor method createIndexTasks.
@Override
protected List<SeekableStreamIndexTask<String, String, ByteEntity>> createIndexTasks(int replicas, String baseSequenceName, ObjectMapper sortingMapper, TreeMap<Integer, Map<String, String>> sequenceOffsets, SeekableStreamIndexTaskIOConfig taskIoConfig, SeekableStreamIndexTaskTuningConfig taskTuningConfig, RowIngestionMetersFactory rowIngestionMetersFactory) throws JsonProcessingException {
final String checkpoints = sortingMapper.writerFor(CHECKPOINTS_TYPE_REF).writeValueAsString(sequenceOffsets);
final Map<String, Object> context = createBaseTaskContexts();
context.put(CHECKPOINTS_CTX_KEY, checkpoints);
List<SeekableStreamIndexTask<String, String, ByteEntity>> taskList = new ArrayList<>();
for (int i = 0; i < replicas; i++) {
String taskId = IdUtils.getRandomIdWithPrefix(baseSequenceName);
taskList.add(new KinesisIndexTask(taskId, new TaskResource(baseSequenceName, 1), spec.getDataSchema(), (KinesisIndexTaskTuningConfig) taskTuningConfig, (KinesisIndexTaskIOConfig) taskIoConfig, context, awsCredentialsConfig));
}
return taskList;
}
use of org.apache.druid.indexing.common.task.TaskResource in project druid by druid-io.
the class TaskLifecycleTest method testSimple.
@Test
public void testSimple() throws Exception {
final Task task = new AbstractFixedIntervalTask("id1", "id1", new TaskResource("id1", 1), "ds", Intervals.of("2012-01-01/P1D"), null) {
@Override
public String getType() {
return "test";
}
@Override
public void stopGracefully(TaskConfig taskConfig) {
}
@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
final Interval interval = Intervals.of("2012-01-01/P1D");
final TimeChunkLockTryAcquireAction action = new TimeChunkLockTryAcquireAction(TaskLockType.EXCLUSIVE, interval);
final TaskLock lock = toolbox.getTaskActionClient().submit(action);
if (lock == null) {
throw new ISE("Failed to get a lock");
}
final DataSegment segment = DataSegment.builder().dataSource("ds").interval(interval).version(lock.getVersion()).size(0).build();
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(segment)));
return TaskStatus.success(getId());
}
};
final TaskStatus status = runTask(task);
Assert.assertEquals(taskLocation, status.getLocation());
Assert.assertEquals("statusCode", TaskState.SUCCESS, status.getStatusCode());
Assert.assertEquals("segments published", 1, mdc.getPublished().size());
Assert.assertEquals("segments nuked", 0, mdc.getNuked().size());
}
use of org.apache.druid.indexing.common.task.TaskResource in project druid by druid-io.
the class KafkaSupervisor method createIndexTasks.
@Override
protected List<SeekableStreamIndexTask<Integer, Long, KafkaRecordEntity>> createIndexTasks(int replicas, String baseSequenceName, ObjectMapper sortingMapper, TreeMap<Integer, Map<Integer, Long>> sequenceOffsets, SeekableStreamIndexTaskIOConfig taskIoConfig, SeekableStreamIndexTaskTuningConfig taskTuningConfig, RowIngestionMetersFactory rowIngestionMetersFactory) throws JsonProcessingException {
final String checkpoints = sortingMapper.writerFor(CHECKPOINTS_TYPE_REF).writeValueAsString(sequenceOffsets);
final Map<String, Object> context = createBaseTaskContexts();
context.put(CHECKPOINTS_CTX_KEY, checkpoints);
// Kafka index task always uses incremental handoff since 0.16.0.
// The below is for the compatibility when you want to downgrade your cluster to something earlier than 0.16.0.
// Kafka index task will pick up LegacyKafkaIndexTaskRunner without the below configuration.
context.put("IS_INCREMENTAL_HANDOFF_SUPPORTED", true);
List<SeekableStreamIndexTask<Integer, Long, KafkaRecordEntity>> taskList = new ArrayList<>();
for (int i = 0; i < replicas; i++) {
String taskId = IdUtils.getRandomIdWithPrefix(baseSequenceName);
taskList.add(new KafkaIndexTask(taskId, new TaskResource(baseSequenceName, 1), spec.getDataSchema(), (KafkaIndexTaskTuningConfig) taskTuningConfig, (KafkaIndexTaskIOConfig) taskIoConfig, context, sortingMapper));
}
return taskList;
}
use of org.apache.druid.indexing.common.task.TaskResource in project druid by druid-io.
the class TaskAnnouncementTest method testBackwardsCompatibleSerde.
@Test
public void testBackwardsCompatibleSerde() throws Exception {
final Task task = new RealtimeIndexTask("theid", new TaskResource("rofl", 2), new FireDepartment(new DataSchema("foo", null, new AggregatorFactory[0], null, null, new DefaultObjectMapper()), new RealtimeIOConfig(new LocalFirehoseFactory(new File("lol"), "rofl", null), (schema, config, metrics) -> null), null), null);
final TaskStatus status = TaskStatus.running(task.getId());
final TaskAnnouncement announcement = TaskAnnouncement.create(task, status, TaskLocation.unknown());
final String statusJson = jsonMapper.writeValueAsString(status);
final String announcementJson = jsonMapper.writeValueAsString(announcement);
final TaskStatus statusFromStatus = jsonMapper.readValue(statusJson, TaskStatus.class);
final TaskStatus statusFromAnnouncement = jsonMapper.readValue(announcementJson, TaskStatus.class);
final TaskAnnouncement announcementFromStatus = jsonMapper.readValue(statusJson, TaskAnnouncement.class);
final TaskAnnouncement announcementFromAnnouncement = jsonMapper.readValue(announcementJson, TaskAnnouncement.class);
Assert.assertEquals("theid", statusFromStatus.getId());
Assert.assertEquals("theid", statusFromAnnouncement.getId());
Assert.assertEquals("theid", announcementFromStatus.getTaskStatus().getId());
Assert.assertEquals("theid", announcementFromAnnouncement.getTaskStatus().getId());
Assert.assertEquals("theid", announcementFromStatus.getTaskResource().getAvailabilityGroup());
Assert.assertEquals("rofl", announcementFromAnnouncement.getTaskResource().getAvailabilityGroup());
Assert.assertEquals(1, announcementFromStatus.getTaskResource().getRequiredCapacity());
Assert.assertEquals(2, announcementFromAnnouncement.getTaskResource().getRequiredCapacity());
}
Aggregations