use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class ServerManager method getQueryRunnerForIntervals.
@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
throw new ISE("Unknown query type[%s].", query.getClass());
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = getBuilderFn(toolChest);
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
DataSource dataSource = query.getDataSource();
if (!(dataSource instanceof TableDataSource)) {
throw new UnsupportedOperationException("data source type '" + dataSource.getClass().getName() + "' unsupported");
}
String dataSourceName = getDataSourceName(dataSource);
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = dataSources.get(dataSourceName);
if (timeline == null) {
return new NoopQueryRunner<T>();
}
FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(intervals).transformCat(new Function<Interval, Iterable<TimelineObjectHolder<String, ReferenceCountingSegment>>>() {
@Override
public Iterable<TimelineObjectHolder<String, ReferenceCountingSegment>> apply(Interval input) {
return timeline.lookup(input);
}
}).transformCat(new Function<TimelineObjectHolder<String, ReferenceCountingSegment>, Iterable<QueryRunner<T>>>() {
@Override
public Iterable<QueryRunner<T>> apply(@Nullable final TimelineObjectHolder<String, ReferenceCountingSegment> holder) {
if (holder == null) {
return null;
}
return FunctionalIterable.create(holder.getObject()).transform(new Function<PartitionChunk<ReferenceCountingSegment>, QueryRunner<T>>() {
@Override
public QueryRunner<T> apply(PartitionChunk<ReferenceCountingSegment> input) {
return buildAndDecorateQueryRunner(factory, toolChest, input.getObject(), new SegmentDescriptor(holder.getInterval(), holder.getVersion(), input.getChunkNumber()), builderFn, cpuTimeAccumulator);
}
});
}
});
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<T>(toolChest.mergeResults(factory.mergeRunners(exec, queryRunners)), toolChest), builderFn, emitter, cpuTimeAccumulator, true);
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class RealtimeIndexTaskTest method testRestoreAfterHandoffAttemptDuringShutdown.
@Test(timeout = 60_000L)
public void testRestoreAfterHandoffAttemptDuringShutdown() throws Exception {
final TaskStorage taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
final DataSegment publishedSegment;
// First run:
{
final TaskToolbox taskToolbox = makeToolbox(task1, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Do a query.
Assert.assertEquals(1, sumMetric(task1, "rows"));
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter.
while (!statusFuture.isDone()) {
Thread.sleep(50);
}
}
// Second run:
{
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task2.getFirehose() == null) {
Thread.sleep(50);
}
// Stop the firehose again, this will start another handoff.
final TestFirehose firehose = (TestFirehose) task2.getFirehose();
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// publishedSegment is still published. No reason it shouldn't be.
Assert.assertEquals(ImmutableSet.of(publishedSegment), mdc.getPublished());
// Wait for a handoffCallback to show up.
while (handOffCallbacks.isEmpty()) {
Thread.sleep(50);
}
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class RealtimeIndexTaskTest method testBasics.
@Test(timeout = 60_000L)
public void testBasics() throws Exception {
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
final DataSegment publishedSegment;
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")), new MapBasedInputRow(now.minus(new Period("P1D")), ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", 2.0)), new MapBasedInputRow(now, ImmutableList.of("dim2"), ImmutableMap.<String, Object>of("dim2", "bar", "met1", 2.0))));
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Check metrics.
Assert.assertEquals(2, task.getMetrics().processed());
Assert.assertEquals(1, task.getMetrics().thrownAway());
Assert.assertEquals(0, task.getMetrics().unparseable());
// Do some queries.
Assert.assertEquals(2, sumMetric(task, "rows"));
Assert.assertEquals(3, sumMetric(task, "met1"));
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class RealtimeIndexTaskTest method testNoReportParseExceptions.
@Test(timeout = 60_000L)
public void testNoReportParseExceptions() throws Exception {
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final RealtimeIndexTask task = makeRealtimeTask(null, false);
final TaskToolbox taskToolbox = makeToolbox(task, mdc, tempFolder.newFolder());
final ListenableFuture<TaskStatus> statusFuture = runTask(task, taskToolbox);
final DataSegment publishedSegment;
// Wait for firehose to show up, it starts off null.
while (task.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task.getFirehose();
firehose.addRows(Arrays.<InputRow>asList(// Good row- will be processed.
new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", "1")), // Null row- will be unparseable.
null, // Bad metric- will count as processed, but that particular metric won't update.
new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", "foo")), // Bad row- will be unparseable.
new MapBasedInputRow(now, ImmutableList.of("dim1", FAIL_DIM), ImmutableMap.<String, Object>of("dim1", "foo", "met1", 2.0)), // Old row- will be thrownAway.
new MapBasedInputRow(now.minus(new Period("P1D")), ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo", "met1", 2.0)), // Good row- will be processed.
new MapBasedInputRow(now, ImmutableList.of("dim2"), ImmutableMap.<String, Object>of("dim2", "bar", "met1", 2.0))));
// Stop the firehose, this will drain out existing events.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Check metrics.
Assert.assertEquals(3, task.getMetrics().processed());
Assert.assertEquals(1, task.getMetrics().thrownAway());
Assert.assertEquals(2, task.getMetrics().unparseable());
// Do some queries.
Assert.assertEquals(3, sumMetric(task, "rows"));
Assert.assertEquals(3, sumMetric(task, "met1"));
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class RealtimeIndexTaskTest method makeToolbox.
private TaskToolbox makeToolbox(final Task task, final TaskStorage taskStorage, final IndexerMetadataStorageCoordinator mdc, final File directory) {
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, false, null, null);
final TaskLockbox taskLockbox = new TaskLockbox(taskStorage);
try {
taskStorage.insert(task, TaskStatus.running(task.getId()));
} catch (EntryExistsException e) {
// suppress
}
taskLockbox.syncFromStorage();
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, mdc, emitter, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox);
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(new IntervalChunkingQueryRunnerDecorator(null, null, null) {
@Override
public <T> QueryRunner<T> decorate(QueryRunner<T> delegate, QueryToolChest<T, ? extends Query<T>> toolChest) {
return delegate;
}
}), new TimeseriesQueryEngine(), new QueryWatcher() {
@Override
public void registerQuery(Query query, ListenableFuture future) {
// do nothing
}
})));
handOffCallbacks = Maps.newConcurrentMap();
final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory() {
@Override
public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) {
return new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
return true;
}
@Override
public void start() {
//Noop
}
@Override
public void close() {
//Noop
}
Map<SegmentDescriptor, Pair<Executor, Runnable>> getHandOffCallbacks() {
return handOffCallbacks;
}
};
}
};
final TestUtils testUtils = new TestUtils();
final TaskToolboxFactory toolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), handoffNotifierFactory, conglomerate, // queryExecutorService
MoreExecutors.sameThreadExecutor(), EasyMock.createMock(MonitorScheduler.class), new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, testUtils.getTestObjectMapper())), testUtils.getTestObjectMapper(), testUtils.getTestIndexMerger(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), testUtils.getTestIndexMergerV9());
return toolboxFactory.build(task);
}
Aggregations