use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class KafkaIndexTaskTest method runTask.
private ListenableFuture<TaskStatus> runTask(final Task task) {
try {
taskStorage.insert(task, TaskStatus.running(task.getId()));
} catch (EntryExistsException e) {
// suppress
}
taskLockbox.syncFromStorage();
final TaskToolbox toolbox = toolboxFactory.build(task);
synchronized (runningTasks) {
runningTasks.add(task);
}
return taskExec.submit(new Callable<TaskStatus>() {
@Override
public TaskStatus call() throws Exception {
try {
if (task.isReady(toolbox.getTaskActionClient())) {
return task.run(toolbox);
} else {
throw new ISE("Task is not ready");
}
} catch (Exception e) {
log.warn(e, "Task failed");
return TaskStatus.failure(task.getId());
}
}
});
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class RealtimeIndexTaskTest method testStopBeforeStarting.
@Test(timeout = 60_000L)
public void testStopBeforeStarting() throws Exception {
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
task1.stopGracefully();
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final TaskToolbox taskToolbox = makeToolbox(task1, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class RealtimeIndexTaskTest method testRestore.
@Test(timeout = 60_000L)
public void testRestore() throws Exception {
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
final DataSegment publishedSegment;
// First run:
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final TaskToolbox taskToolbox = makeToolbox(task1, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter, but we'll check it anyway.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
// Nothing should be published.
Assert.assertEquals(Sets.newHashSet(), mdc.getPublished());
}
// Second run:
{
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task2.getFirehose() == null) {
Thread.sleep(50);
}
// Do a query, at this point the previous data should be loaded.
Assert.assertEquals(1, sumMetric(task2, "rows"));
final TestFirehose firehose = (TestFirehose) task2.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim2"), ImmutableMap.<String, Object>of("dim2", "bar"))));
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Do a query.
Assert.assertEquals(2, sumMetric(task2, "rows"));
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class SameIntervalMergeTaskTest method runTask.
private List<DataSegment> runTask(final SameIntervalMergeTask mergeTask, final String version) throws Exception {
boolean isReady = mergeTask.isReady(new TaskActionClient() {
@Override
public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException {
if (taskAction instanceof LockTryAcquireAction) {
// the lock of this interval is required
Assert.assertEquals(mergeTask.getInterval(), ((LockTryAcquireAction) taskAction).getInterval());
isRedayCountDown.countDown();
taskLock = new TaskLock(mergeTask.getGroupId(), mergeTask.getDataSource(), mergeTask.getInterval(), version);
return (RetType) taskLock;
}
return null;
}
});
// ensure LockTryAcquireAction is submitted
Assert.assertTrue(isReady);
final List<DataSegment> segments = Lists.newArrayList();
mergeTask.run(new TaskToolbox(null, null, new TaskActionClient() {
@Override
public <RetType> RetType submit(TaskAction<RetType> taskAction) throws IOException {
if (taskAction instanceof LockListAction) {
Assert.assertNotNull("taskLock should be acquired before list", taskLock);
return (RetType) Arrays.asList(taskLock);
}
if (taskAction instanceof SegmentListUsedAction) {
List<DataSegment> segments = ImmutableList.of(DataSegment.builder().dataSource(mergeTask.getDataSource()).interval(new Interval("2010-01-01/PT1H")).version("oldVersion").shardSpec(new LinearShardSpec(0)).build(), DataSegment.builder().dataSource(mergeTask.getDataSource()).interval(new Interval("2010-01-01/PT1H")).version("oldVersion").shardSpec(new LinearShardSpec(0)).build(), DataSegment.builder().dataSource(mergeTask.getDataSource()).interval(new Interval("2010-01-01/PT2H")).version("oldVersion").shardSpec(new LinearShardSpec(0)).build());
return (RetType) segments;
}
if (taskAction instanceof SegmentInsertAction) {
publishCountDown.countDown();
return null;
}
return null;
}
}, new NoopServiceEmitter(), new DataSegmentPusher() {
@Deprecated
@Override
public String getPathForHadoop(String dataSource) {
return getPathForHadoop();
}
@Override
public String getPathForHadoop() {
return null;
}
@Override
public DataSegment push(File file, DataSegment segment) throws IOException {
// the merged segment is pushed to storage
segments.add(segment);
return segment;
}
}, null, null, null, null, null, null, null, null, new SegmentLoader() {
@Override
public boolean isSegmentLoaded(DataSegment segment) throws SegmentLoadingException {
return false;
}
@Override
public Segment getSegment(DataSegment segment) throws SegmentLoadingException {
return null;
}
@Override
public File getSegmentFiles(DataSegment segment) throws SegmentLoadingException {
// dummy file to represent the downloaded segment's dir
return new File("" + segment.getShardSpec().getPartitionNum());
}
@Override
public void cleanup(DataSegment segment) throws SegmentLoadingException {
}
}, jsonMapper, temporaryFolder.newFolder(), EasyMock.createMock(IndexMerger.class), indexIO, null, null, EasyMock.createMock(IndexMergerV9.class)));
return segments;
}
use of io.druid.indexing.common.TaskToolbox in project druid by druid-io.
the class HadoopTaskTest method testBuildClassLoader.
@Test
public void testBuildClassLoader() throws Exception {
final HadoopTask task = new HadoopTask("taskId", "dataSource", ImmutableList.<String>of(), ImmutableMap.<String, Object>of()) {
@Override
public String getType() {
return null;
}
@Override
public boolean isReady(TaskActionClient taskActionClient) throws Exception {
return false;
}
@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
return null;
}
};
final TaskToolbox toolbox = EasyMock.createStrictMock(TaskToolbox.class);
EasyMock.expect(toolbox.getConfig()).andReturn(new TaskConfig(temporaryFolder.newFolder().toString(), null, null, null, ImmutableList.of("something:hadoop:1"), false, null, null)).once();
EasyMock.replay(toolbox);
final ClassLoader classLoader = task.buildClassLoader(toolbox);
assertClassLoaderIsSingular(classLoader);
final Class<?> hadoopClazz = Class.forName("org.apache.hadoop.fs.FSDataInputStream", false, classLoader);
assertClassLoaderIsSingular(hadoopClazz.getClassLoader());
final Class<?> druidHadoopConfigClazz = Class.forName("io.druid.indexer.HadoopDruidIndexerConfig", false, classLoader);
assertClassLoaderIsSingular(druidHadoopConfigClazz.getClassLoader());
}
Aggregations