use of io.druid.indexing.common.task.RealtimeIndexTask in project druid by druid-io.
the class TaskLifecycleTest method newRealtimeIndexTask.
private RealtimeIndexTask newRealtimeIndexTask() {
String taskId = String.format("rt_task_%s", System.currentTimeMillis());
DataSchema dataSchema = new DataSchema("test_ds", null, new AggregatorFactory[] { new LongSumAggregatorFactory("count", "rows") }, new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, null), mapper);
RealtimeIOConfig realtimeIOConfig = new RealtimeIOConfig(new MockFirehoseFactory(true), null, // PlumberSchool - Realtime Index Task always uses RealtimePlumber which is hardcoded in RealtimeIndexTask class
null);
RealtimeTuningConfig realtimeTuningConfig = new RealtimeTuningConfig(1000, new Period("P1Y"), //default window period of 10 minutes
null, // base persist dir ignored by Realtime Index task
null, null, null, null, null, null, null, 0, 0, null, null);
FireDepartment fireDepartment = new FireDepartment(dataSchema, realtimeIOConfig, realtimeTuningConfig);
return new RealtimeIndexTask(taskId, new TaskResource(taskId, 1), fireDepartment, null);
}
use of io.druid.indexing.common.task.RealtimeIndexTask in project druid by druid-io.
the class TaskAnnouncementTest method testBackwardsCompatibleSerde.
@Test
public void testBackwardsCompatibleSerde() throws Exception {
final Task task = new RealtimeIndexTask("theid", new TaskResource("rofl", 2), new FireDepartment(new DataSchema("foo", null, new AggregatorFactory[0], null, new DefaultObjectMapper()), new RealtimeIOConfig(new LocalFirehoseFactory(new File("lol"), "rofl", null), new PlumberSchool() {
@Override
public Plumber findPlumber(DataSchema schema, RealtimeTuningConfig config, FireDepartmentMetrics metrics) {
return null;
}
}, null), null), null);
final TaskStatus status = TaskStatus.running(task.getId());
final TaskAnnouncement announcement = TaskAnnouncement.create(task, status, TaskLocation.unknown());
final String statusJson = jsonMapper.writeValueAsString(status);
final String announcementJson = jsonMapper.writeValueAsString(announcement);
final TaskStatus statusFromStatus = jsonMapper.readValue(statusJson, TaskStatus.class);
final TaskStatus statusFromAnnouncement = jsonMapper.readValue(announcementJson, TaskStatus.class);
final TaskAnnouncement announcementFromStatus = jsonMapper.readValue(statusJson, TaskAnnouncement.class);
final TaskAnnouncement announcementFromAnnouncement = jsonMapper.readValue(announcementJson, TaskAnnouncement.class);
Assert.assertEquals("theid", statusFromStatus.getId());
Assert.assertEquals("theid", statusFromAnnouncement.getId());
Assert.assertEquals("theid", announcementFromStatus.getTaskStatus().getId());
Assert.assertEquals("theid", announcementFromAnnouncement.getTaskStatus().getId());
Assert.assertEquals("theid", announcementFromStatus.getTaskResource().getAvailabilityGroup());
Assert.assertEquals("rofl", announcementFromAnnouncement.getTaskResource().getAvailabilityGroup());
Assert.assertEquals(1, announcementFromStatus.getTaskResource().getRequiredCapacity());
Assert.assertEquals(2, announcementFromAnnouncement.getTaskResource().getRequiredCapacity());
}
use of io.druid.indexing.common.task.RealtimeIndexTask in project druid by druid-io.
the class TaskLifecycleTest method testRealtimeIndexTask.
@Test(timeout = 60_000L)
public void testRealtimeIndexTask() throws Exception {
publishCountDown = new CountDownLatch(1);
monitorScheduler.addMonitor(EasyMock.anyObject(Monitor.class));
EasyMock.expectLastCall().atLeastOnce();
monitorScheduler.removeMonitor(EasyMock.anyObject(Monitor.class));
EasyMock.expectLastCall().anyTimes();
EasyMock.replay(monitorScheduler, queryRunnerFactoryConglomerate);
RealtimeIndexTask realtimeIndexTask = newRealtimeIndexTask();
final String taskId = realtimeIndexTask.getId();
taskQueue.start();
taskQueue.add(realtimeIndexTask);
//wait for task to process events and publish segment
publishCountDown.await();
// Realtime Task has published the segment, simulate loading of segment to a historical node so that task finishes with SUCCESS status
Assert.assertEquals(1, handOffCallbacks.size());
Pair<Executor, Runnable> executorRunnablePair = Iterables.getOnlyElement(handOffCallbacks.values());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
handOffCallbacks.clear();
// Wait for realtime index task to handle callback in plumber and succeed
while (tsqa.getStatus(taskId).get().isRunnable()) {
Thread.sleep(10);
}
Assert.assertTrue("Task should be in Success state", tsqa.getStatus(taskId).get().isSuccess());
Assert.assertEquals(1, announcedSinks);
Assert.assertEquals(1, pushedSegments);
Assert.assertEquals(1, mdc.getPublished().size());
DataSegment segment = mdc.getPublished().iterator().next();
Assert.assertEquals("test_ds", segment.getDataSource());
Assert.assertEquals(ImmutableList.of("dim1", "dim2"), segment.getDimensions());
Assert.assertEquals(new Interval(now.toString("YYYY-MM-dd") + "/" + now.plusDays(1).toString("YYYY-MM-dd")), segment.getInterval());
Assert.assertEquals(ImmutableList.of("count"), segment.getMetrics());
EasyMock.verify(monitorScheduler, queryRunnerFactoryConglomerate);
}
use of io.druid.indexing.common.task.RealtimeIndexTask in project druid by druid-io.
the class DruidJsonValidatorTest method testTaskValidator.
@Test
public void testTaskValidator() throws Exception {
final ObjectMapper jsonMapper = new DefaultObjectMapper();
for (final Module jacksonModule : new FirehoseModule().getJacksonModules()) {
jsonMapper.registerModule(jacksonModule);
}
final RealtimeIndexTask task = new RealtimeIndexTask(null, new TaskResource("rofl", 2), new FireDepartment(new DataSchema("foo", null, new AggregatorFactory[0], new UniformGranularitySpec(Granularities.HOUR, Granularities.NONE, null), jsonMapper), new RealtimeIOConfig(new LocalFirehoseFactory(new File("lol"), "rofl", null), new PlumberSchool() {
@Override
public Plumber findPlumber(DataSchema schema, RealtimeTuningConfig config, FireDepartmentMetrics metrics) {
return null;
}
}, null), new RealtimeTuningConfig(1, new Period("PT10M"), null, null, null, null, 1, NoneShardSpec.instance(), new IndexSpec(), null, 0, 0, true, null)), null);
File tmp = temporaryFolder.newFile("test_task.json");
jsonMapper.writeValue(tmp, task);
parseCommand("validator", "-f", tmp.getAbsolutePath(), "-t", "task").run();
}
use of io.druid.indexing.common.task.RealtimeIndexTask in project druid by druid-io.
the class KafkaSupervisorTest method testKillIncompatibleTasks.
@Test
public void testKillIncompatibleTasks() throws Exception {
supervisor = getSupervisor(2, 1, true, "PT1H", null);
addSomeEvents(1);
Task id1 = createKafkaIndexTask("id1", DATASOURCE, "index_kafka_testDS__some_other_sequenceName", new KafkaPartitions("topic", ImmutableMap.of(0, 0L)), new KafkaPartitions("topic", ImmutableMap.of(0, 10L)), null);
Task id2 = createKafkaIndexTask("id2", DATASOURCE, "sequenceName-0", new KafkaPartitions("topic", ImmutableMap.of(0, 0L, 1, 0L, 2, 0L)), new KafkaPartitions("topic", ImmutableMap.of(0, 333L, 1, 333L, 2, 333L)), null);
Task id3 = createKafkaIndexTask("id3", DATASOURCE, "index_kafka_testDS__some_other_sequenceName", new KafkaPartitions("topic", ImmutableMap.of(0, 0L, 1, 0L, 2, 1L)), new KafkaPartitions("topic", ImmutableMap.of(0, 333L, 1, 333L, 2, 330L)), null);
Task id4 = createKafkaIndexTask("id4", "other-datasource", "index_kafka_testDS_d927edff33c4b3f", new KafkaPartitions("topic", ImmutableMap.of(0, 0L)), new KafkaPartitions("topic", ImmutableMap.of(0, 10L)), null);
Task id5 = new // non KafkaIndexTask (don't kill)
RealtimeIndexTask("id5", null, new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null), null);
List<Task> existingTasks = ImmutableList.of(id1, id2, id3, id4, id5);
expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
expect(taskMaster.getTaskRunner()).andReturn(Optional.of(taskRunner)).anyTimes();
expect(taskRunner.getRunningTasks()).andReturn(Collections.EMPTY_LIST).anyTimes();
expect(taskStorage.getActiveTasks()).andReturn(existingTasks).anyTimes();
expect(taskStorage.getStatus("id1")).andReturn(Optional.of(TaskStatus.running("id1"))).anyTimes();
expect(taskStorage.getStatus("id2")).andReturn(Optional.of(TaskStatus.running("id2"))).anyTimes();
expect(taskStorage.getStatus("id3")).andReturn(Optional.of(TaskStatus.running("id3"))).anyTimes();
expect(taskStorage.getTask("id1")).andReturn(Optional.of(id1)).anyTimes();
expect(taskStorage.getTask("id2")).andReturn(Optional.of(id2)).anyTimes();
expect(taskStorage.getTask("id3")).andReturn(Optional.of(id3)).anyTimes();
expect(taskClient.getStatusAsync(anyString())).andReturn(Futures.immediateFuture(KafkaIndexTask.Status.NOT_STARTED)).anyTimes();
expect(taskClient.getStartTimeAsync(anyString())).andReturn(Futures.immediateFuture(DateTime.now())).anyTimes();
expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
expect(taskClient.stopAsync("id1", false)).andReturn(Futures.immediateFuture(true));
expect(taskClient.stopAsync("id3", false)).andReturn(Futures.immediateFuture(false));
taskRunner.registerListener(anyObject(TaskRunnerListener.class), anyObject(Executor.class));
taskQueue.shutdown("id3");
expect(taskQueue.add(anyObject(Task.class))).andReturn(true);
replayAll();
supervisor.start();
supervisor.runInternal();
verifyAll();
}
Aggregations