use of org.apache.druid.indexing.common.task.Task in project druid by druid-io.
the class SegmentAllocateActionTest method testAllocateAllGranularity.
@Test
public void testAllocateAllGranularity() {
final Task task = NoopTask.create();
taskActionTestKit.getTaskLockbox().add(task);
final SegmentIdWithShardSpec id1 = allocate(task, PARTY_TIME, Granularities.MINUTE, Granularities.ALL, "s1", null);
final SegmentIdWithShardSpec id2 = allocate(task, PARTY_TIME, Granularities.MINUTE, Granularities.ALL, "s2", null);
Assert.assertNotNull(id1);
Assert.assertNotNull(id2);
Assert.assertEquals(Intervals.ETERNITY, id1.getInterval());
Assert.assertEquals(Intervals.ETERNITY, id2.getInterval());
}
use of org.apache.druid.indexing.common.task.Task in project druid by druid-io.
the class WorkerTaskManager method initAssignedTasks.
private void initAssignedTasks() throws IOException {
File assignedTaskDir = getAssignedTaskDir();
log.debug("Looking for any previously assigned tasks on disk[%s].", assignedTaskDir);
FileUtils.mkdirp(assignedTaskDir);
for (File taskFile : assignedTaskDir.listFiles()) {
try {
String taskId = taskFile.getName();
Task task = jsonMapper.readValue(taskFile, Task.class);
if (taskId.equals(task.getId())) {
assignedTasks.put(taskId, task);
} else {
throw new ISE("Corrupted assigned task on disk[%s].", taskFile.getAbsoluteFile());
}
} catch (IOException ex) {
log.noStackTrace().error(ex, "Failed to read assigned task from disk at [%s]. Ignored.", taskFile.getAbsoluteFile());
}
}
if (!assignedTasks.isEmpty()) {
log.info("Found %,d running tasks from previous run: %s", assignedTasks.size(), assignedTasks.values().stream().map(Task::getId).collect(Collectors.joining(", ")));
}
for (Task task : assignedTasks.values()) {
submitNoticeToExec(new RunNotice(task));
}
}
use of org.apache.druid.indexing.common.task.Task in project druid by druid-io.
the class WorkerTaskManager method getChangesSince.
public ListenableFuture<ChangeRequestsSnapshot<WorkerHistoryItem>> getChangesSince(ChangeRequestHistory.Counter counter) {
Preconditions.checkState(lifecycleLock.awaitStarted(1, TimeUnit.SECONDS), "not started");
if (counter.getCounter() < 0) {
synchronized (lock) {
List<WorkerHistoryItem> items = new ArrayList<>();
items.add(new WorkerHistoryItem.Metadata(disabled.get()));
for (Task task : assignedTasks.values()) {
items.add(new WorkerHistoryItem.TaskUpdate(TaskAnnouncement.create(task, TaskStatus.running(task.getId()), TaskLocation.unknown())));
}
for (TaskDetails details : runningTasks.values()) {
items.add(new WorkerHistoryItem.TaskUpdate(TaskAnnouncement.create(details.task, details.status, details.location)));
}
for (TaskAnnouncement taskAnnouncement : completedTasks.values()) {
items.add(new WorkerHistoryItem.TaskUpdate(taskAnnouncement));
}
SettableFuture<ChangeRequestsSnapshot<WorkerHistoryItem>> future = SettableFuture.create();
future.set(ChangeRequestsSnapshot.success(changeHistory.getLastCounter(), Lists.newArrayList(items)));
return future;
}
} else {
return changeHistory.getRequestsSince(counter);
}
}
use of org.apache.druid.indexing.common.task.Task in project druid by druid-io.
the class SegmentInsertActionTest method testFailBadVersion.
@Test
public void testFailBadVersion() throws Exception {
final Task task = NoopTask.create();
final SegmentInsertAction action = new SegmentInsertAction(ImmutableSet.of(SEGMENT3));
actionTestKit.getTaskLockbox().add(task);
acquireTimeChunkLock(TaskLockType.EXCLUSIVE, task, INTERVAL, 5000);
thrown.expect(IllegalStateException.class);
thrown.expectMessage(CoreMatchers.containsString("are not covered by locks"));
final Set<DataSegment> segments = actionTestKit.getTaskLockbox().doInCriticalSection(task, Collections.singletonList(INTERVAL), CriticalAction.<Set<DataSegment>>builder().onValidLocks(() -> action.perform(task, actionTestKit.getTaskActionToolbox())).onInvalidLocks(() -> {
Assert.fail();
return null;
}).build());
Assert.assertEquals(ImmutableSet.of(SEGMENT3), segments);
}
use of org.apache.druid.indexing.common.task.Task in project druid by druid-io.
the class TaskLocksTest method testFindSegmentLocksForSegments.
@Test
public void testFindSegmentLocksForSegments() {
final Set<DataSegment> segments = createNumberedPartitionedSegments();
final Interval interval = Intervals.of("2017-01-01/2017-01-02");
final String version = DateTimes.nowUtc().toString();
final List<TaskLock> locks = IntStream.range(0, 5).mapToObj(partitionId -> {
final TaskLock lock = trySegmentLock(task, interval, version, partitionId).getTaskLock();
Assert.assertNotNull(lock);
return lock;
}).collect(Collectors.toList());
Assert.assertEquals(5, locks.size());
Assert.assertEquals(ImmutableList.of(newSegmentLock(interval, locks.get(0).getVersion(), 0), newSegmentLock(interval, locks.get(0).getVersion(), 1), newSegmentLock(interval, locks.get(0).getVersion(), 2), newSegmentLock(interval, locks.get(0).getVersion(), 3), newSegmentLock(interval, locks.get(0).getVersion(), 4)), TaskLocks.findLocksForSegments(task, lockbox, segments));
}
Aggregations