use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.
the class KafkaSupervisor method possiblyRegisterListener.
public void possiblyRegisterListener() {
if (listenerRegistered) {
return;
}
Optional<TaskRunner> taskRunner = taskMaster.getTaskRunner();
if (taskRunner.isPresent()) {
taskRunner.get().registerListener(new TaskRunnerListener() {
@Override
public String getListenerId() {
return supervisorId;
}
@Override
public void locationChanged(final String taskId, final TaskLocation newLocation) {
// do nothing
}
@Override
public void statusChanged(String taskId, TaskStatus status) {
notices.add(new RunNotice());
}
}, MoreExecutors.sameThreadExecutor());
listenerRegistered = true;
}
}
use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.
the class KafkaSupervisorTest method testLateMessageRejectionPeriod.
@Test
public void testLateMessageRejectionPeriod() throws Exception {
supervisor = getSupervisor(2, 1, true, "PT1H", new Period("PT1H"));
addSomeEvents(1);
Capture<KafkaIndexTask> captured = Capture.newInstance(CaptureType.ALL);
expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
expect(taskMaster.getTaskRunner()).andReturn(Optional.<TaskRunner>absent()).anyTimes();
expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
expect(taskQueue.add(capture(captured))).andReturn(true).times(2);
replayAll();
supervisor.start();
supervisor.runInternal();
verifyAll();
KafkaIndexTask task1 = captured.getValues().get(0);
KafkaIndexTask task2 = captured.getValues().get(1);
Assert.assertTrue("minimumMessageTime", task1.getIOConfig().getMinimumMessageTime().get().plusMinutes(59).isBeforeNow());
Assert.assertTrue("minimumMessageTime", task1.getIOConfig().getMinimumMessageTime().get().plusMinutes(61).isAfterNow());
Assert.assertEquals(task1.getIOConfig().getMinimumMessageTime().get(), task2.getIOConfig().getMinimumMessageTime().get());
}
use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.
the class KafkaSupervisorTest method testReplicas.
@Test
public void testReplicas() throws Exception {
supervisor = getSupervisor(2, 1, true, "PT1H", null);
addSomeEvents(1);
Capture<KafkaIndexTask> captured = Capture.newInstance(CaptureType.ALL);
expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
expect(taskMaster.getTaskRunner()).andReturn(Optional.<TaskRunner>absent()).anyTimes();
expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
expect(taskQueue.add(capture(captured))).andReturn(true).times(2);
replayAll();
supervisor.start();
supervisor.runInternal();
verifyAll();
KafkaIndexTask task1 = captured.getValues().get(0);
Assert.assertEquals(3, task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().size());
Assert.assertEquals(3, task1.getIOConfig().getEndPartitions().getPartitionOffsetMap().size());
Assert.assertEquals(0L, (long) task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(0));
Assert.assertEquals(0L, (long) task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(1));
Assert.assertEquals(0L, (long) task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(2));
KafkaIndexTask task2 = captured.getValues().get(1);
Assert.assertEquals(3, task2.getIOConfig().getStartPartitions().getPartitionOffsetMap().size());
Assert.assertEquals(3, task2.getIOConfig().getEndPartitions().getPartitionOffsetMap().size());
Assert.assertEquals(0L, (long) task2.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(0));
Assert.assertEquals(0L, (long) task2.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(1));
Assert.assertEquals(0L, (long) task2.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(2));
}
use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.
the class OverlordResourceTest method setUp.
@Before
public void setUp() throws Exception {
taskRunner = EasyMock.createMock(TaskRunner.class);
taskMaster = EasyMock.createStrictMock(TaskMaster.class);
tsqa = EasyMock.createStrictMock(TaskStorageQueryAdapter.class);
req = EasyMock.createStrictMock(HttpServletRequest.class);
EasyMock.expect(taskMaster.getTaskRunner()).andReturn(Optional.of(taskRunner)).anyTimes();
overlordResource = new OverlordResource(taskMaster, tsqa, null, null, null, new AuthConfig(true));
EasyMock.expect(req.getAttribute(AuthConfig.DRUID_AUTH_TOKEN)).andReturn(new AuthorizationInfo() {
@Override
public Access isAuthorized(Resource resource, Action action) {
if (resource.getName().equals("allow")) {
return new Access(true);
} else {
return new Access(false);
}
}
});
}
use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.
the class OverlordResource method getWaitingTasks.
@GET
@Path("/waitingTasks")
@Produces(MediaType.APPLICATION_JSON)
public Response getWaitingTasks(@Context final HttpServletRequest req) {
return workItemsResponse(new Function<TaskRunner, Collection<? extends TaskRunnerWorkItem>>() {
@Override
public Collection<? extends TaskRunnerWorkItem> apply(TaskRunner taskRunner) {
// A bit roundabout, but works as a way of figuring out what tasks haven't been handed
// off to the runner yet:
final List<Task> allActiveTasks = taskStorageQueryAdapter.getActiveTasks();
final List<Task> activeTasks;
if (authConfig.isEnabled()) {
// This is an experimental feature, see - https://github.com/druid-io/druid/pull/2424
final Map<Pair<Resource, Action>, Access> resourceAccessMap = new HashMap<>();
final AuthorizationInfo authorizationInfo = (AuthorizationInfo) req.getAttribute(AuthConfig.DRUID_AUTH_TOKEN);
activeTasks = ImmutableList.copyOf(Iterables.filter(allActiveTasks, new Predicate<Task>() {
@Override
public boolean apply(Task input) {
Resource resource = new Resource(input.getDataSource(), ResourceType.DATASOURCE);
Action action = Action.READ;
Pair<Resource, Action> key = new Pair<>(resource, action);
if (resourceAccessMap.containsKey(key)) {
return resourceAccessMap.get(key).isAllowed();
} else {
Access access = authorizationInfo.isAuthorized(key.lhs, key.rhs);
resourceAccessMap.put(key, access);
return access.isAllowed();
}
}
}));
} else {
activeTasks = allActiveTasks;
}
final Set<String> runnersKnownTasks = Sets.newHashSet(Iterables.transform(taskRunner.getKnownTasks(), new Function<TaskRunnerWorkItem, String>() {
@Override
public String apply(final TaskRunnerWorkItem workItem) {
return workItem.getTaskId();
}
}));
final List<TaskRunnerWorkItem> waitingTasks = Lists.newArrayList();
for (final Task task : activeTasks) {
if (!runnersKnownTasks.contains(task.getId())) {
waitingTasks.add(// Would be nice to include the real created date, but the TaskStorage API doesn't yet allow it.
new TaskRunnerWorkItem(task.getId(), SettableFuture.<TaskStatus>create(), new DateTime(0), new DateTime(0)) {
@Override
public TaskLocation getLocation() {
return TaskLocation.unknown();
}
});
}
}
return waitingTasks;
}
});
}
Aggregations