use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class IndexTaskTest method runTask.
private final List<DataSegment> runTask(final IndexTask indexTask) throws Exception {
final List<DataSegment> segments = Lists.newArrayList();
indexTask.run(new TaskToolbox(null, null, new TaskActionClient() {
@Override
public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException {
if (taskAction instanceof LockListAction) {
return (RetType) Arrays.asList(new TaskLock("", "", null, new DateTime().toString()));
}
if (taskAction instanceof LockAcquireAction) {
return (RetType) new TaskLock("groupId", "test", ((LockAcquireAction) taskAction).getInterval(), new DateTime().toString());
}
if (taskAction instanceof SegmentTransactionalInsertAction) {
return (RetType) new SegmentPublishResult(((SegmentTransactionalInsertAction) taskAction).getSegments(), true);
}
if (taskAction instanceof SegmentAllocateAction) {
SegmentAllocateAction action = (SegmentAllocateAction) taskAction;
Interval interval = action.getPreferredSegmentGranularity().bucket(action.getTimestamp());
ShardSpec shardSpec = new NumberedShardSpec(segmentAllocatePartitionCounter++, 0);
return (RetType) new SegmentIdentifier(action.getDataSource(), interval, "latestVersion", shardSpec);
}
return null;
}
}, null, new DataSegmentPusher() {
@Deprecated
@Override
public String getPathForHadoop(String dataSource) {
return getPathForHadoop();
}
@Override
public String getPathForHadoop() {
return null;
}
@Override
public DataSegment push(File file, DataSegment segment) throws IOException {
segments.add(segment);
return segment;
}
}, null, null, null, null, null, null, null, null, null, jsonMapper, temporaryFolder.newFolder(), indexMerger, indexIO, null, null, indexMergerV9));
Collections.sort(segments);
return segments;
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class TaskLifecycleTest method testBadInterval.
@Test
public void testBadInterval() throws Exception {
final Task task = new AbstractFixedIntervalTask("id1", "id1", "ds", new Interval("2012-01-01/P1D"), null) {
@Override
public String getType() {
return "test";
}
@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
final TaskLock myLock = Iterables.getOnlyElement(toolbox.getTaskActionClient().submit(new LockListAction()));
final DataSegment segment = DataSegment.builder().dataSource("ds").interval(new Interval("2012-01-01/P2D")).version(myLock.getVersion()).build();
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(segment)));
return TaskStatus.success(getId());
}
};
final TaskStatus status = runTask(task);
Assert.assertEquals("statusCode", TaskStatus.Status.FAILED, status.getStatusCode());
Assert.assertEquals("segments published", 0, mdc.getPublished().size());
Assert.assertEquals("segments nuked", 0, mdc.getNuked().size());
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class TaskLifecycleTest method testBadVersion.
@Test
public void testBadVersion() throws Exception {
final Task task = new AbstractFixedIntervalTask("id1", "id1", "ds", new Interval("2012-01-01/P1D"), null) {
@Override
public String getType() {
return "test";
}
@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
final TaskLock myLock = Iterables.getOnlyElement(toolbox.getTaskActionClient().submit(new LockListAction()));
final DataSegment segment = DataSegment.builder().dataSource("ds").interval(new Interval("2012-01-01/P1D")).version(myLock.getVersion() + "1!!!1!!").build();
toolbox.getTaskActionClient().submit(new SegmentInsertAction(ImmutableSet.of(segment)));
return TaskStatus.success(getId());
}
};
final TaskStatus status = runTask(task);
Assert.assertEquals("statusCode", TaskStatus.Status.FAILED, status.getStatusCode());
Assert.assertEquals("segments published", 0, mdc.getPublished().size());
Assert.assertEquals("segments nuked", 0, mdc.getNuked().size());
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class RealtimeIndexTaskTest method testReportParseExceptionsOnBadMetric.
@Test(timeout = 60_000L)
public void testReportParseExceptionsOnBadMetric() throws Exception {
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null, true);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")), new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", "foo")), new MapBasedInputRow(now.minus(new Period("P1D")), ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", "foo")), new MapBasedInputRow(now, ImmutableList.of("dim2"), ImmutableMap.<String, Object>of("dim2", "bar", "met1", 2.0))));
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for the task to finish.
expectedException.expect(ExecutionException.class);
expectedException.expectCause(CoreMatchers.<Throwable>instanceOf(ParseException.class));
expectedException.expectCause(ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString("Encountered parse error for aggregator[met1]")));
expectedException.expect(ThrowableCauseMatcher.hasCause(ThrowableCauseMatcher.hasCause(CoreMatchers.allOf(CoreMatchers.<Throwable>instanceOf(ParseException.class), ThrowableMessageMatcher.hasMessage(CoreMatchers.containsString("Unable to parse metrics[met1], value[foo]"))))));
statusFuture.get();
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class RealtimeIndexTaskTest method testRestoreAfterHandoffAttemptDuringShutdown.
@Test(timeout = 60_000L)
public void testRestoreAfterHandoffAttemptDuringShutdown() throws Exception {
final TaskStorage taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
final DataSegment publishedSegment;
// First run:
{
final TaskToolbox taskToolbox = makeToolbox(task1, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Do a query.
Assert.assertEquals(1, sumMetric(task1, "rows"));
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter.
while (!statusFuture.isDone()) {
Thread.sleep(50);
}
}
// Second run:
{
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task2.getFirehose() == null) {
Thread.sleep(50);
}
// Stop the firehose again, this will start another handoff.
final TestFirehose firehose = (TestFirehose) task2.getFirehose();
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// publishedSegment is still published. No reason it shouldn't be.
Assert.assertEquals(ImmutableSet.of(publishedSegment), mdc.getPublished());
// Wait for a handoffCallback to show up.
while (handOffCallbacks.isEmpty()) {
Thread.sleep(50);
}
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
}
Aggregations