use of org.apache.druid.query.timeseries.TimeseriesQueryEngine in project druid by druid-io.
the class SpatialFilterTest method testSpatialQueryWithOtherSpatialDim.
@Test
public void testSpatialQueryWithOtherSpatialDim() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test").granularity(Granularities.ALL).intervals(Collections.singletonList(Intervals.of("2013-01-01/2013-01-07"))).filters(new SpatialDimFilter("spatialIsRad", new RadiusBound(new float[] { 0.0f, 0.0f }, 5))).aggregators(Arrays.asList(new CountAggregatorFactory("rows"), new LongSumAggregatorFactory("val", "val"))).build();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<TimeseriesResultValue>(DateTimes.of("2013-01-01T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 1L).put("val", 13L).build())));
try {
TimeseriesQueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
QueryRunner runner = new FinalizeResultsQueryRunner(factory.createRunner(segment), factory.getToolchest());
TestHelper.assertExpectedResults(expectedResults, runner.run(QueryPlus.wrap(query)));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.query.timeseries.TimeseriesQueryEngine in project druid by druid-io.
the class StringFirstTimeseriesQueryTest method testTimeseriesQuery.
@Test
public void testTimeseriesQuery() {
TimeseriesQueryEngine engine = new TimeseriesQueryEngine();
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(ImmutableList.of(new StringFirstAggregatorFactory("nonfolding", CLIENT_TYPE, null, 1024), new StringFirstAggregatorFactory("folding", FIRST_CLIENT_TYPE, null, 1024), new StringFirstAggregatorFactory("nonexistent", "nonexistent", null, 1024), new StringFirstAggregatorFactory("numeric", "cnt", null, 1024))).build();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(TIME1, new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("nonfolding", new SerializablePairLongString(TIME1.getMillis(), "iphone")).put("folding", new SerializablePairLongString(TIME1.getMillis(), "iphone")).put("nonexistent", new SerializablePairLongString(DateTimes.MAX.getMillis(), null)).put("numeric", new SerializablePairLongString(DateTimes.MAX.getMillis(), null)).build())));
final Iterable<Result<TimeseriesResultValue>> iiResults = engine.process(query, new IncrementalIndexStorageAdapter(incrementalIndex)).toList();
final Iterable<Result<TimeseriesResultValue>> qiResults = engine.process(query, new QueryableIndexStorageAdapter(queryableIndex)).toList();
TestHelper.assertExpectedResults(expectedResults, iiResults, "incremental index");
TestHelper.assertExpectedResults(expectedResults, qiResults, "queryable index");
}
use of org.apache.druid.query.timeseries.TimeseriesQueryEngine in project druid by druid-io.
the class QueryStackTests method createQueryRunnerFactoryConglomerate.
public static QueryRunnerFactoryConglomerate createQueryRunnerFactoryConglomerate(final Closer closer, final DruidProcessingConfig processingConfig, final Supplier<Integer> minTopNThresholdSupplier) {
final CloseableStupidPool<ByteBuffer> stupidPool = new CloseableStupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(COMPUTE_BUFFER_SIZE));
closer.register(stupidPool);
final Pair<GroupByQueryRunnerFactory, Closer> factoryCloserPair = GroupByQueryRunnerTest.makeQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return GroupByStrategySelector.STRATEGY_V2;
}
}, processingConfig);
final GroupByQueryRunnerFactory groupByQueryRunnerFactory = factoryCloserPair.lhs;
closer.register(factoryCloserPair.rhs);
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(SegmentMetadataQuery.class, new SegmentMetadataQueryRunnerFactory(new SegmentMetadataQueryQueryToolChest(new SegmentMetadataQueryConfig("P1W")), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(ScanQuery.class, new ScanQueryRunnerFactory(new ScanQueryQueryToolChest(new ScanQueryConfig(), new DefaultGenericQueryMetricsFactory()), new ScanQueryEngine(), new ScanQueryConfig())).put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(stupidPool, new TopNQueryQueryToolChest(new TopNQueryConfig() {
@Override
public int getMinTopNThreshold() {
return minTopNThresholdSupplier.get();
}
}), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, groupByQueryRunnerFactory).build());
return conglomerate;
}
use of org.apache.druid.query.timeseries.TimeseriesQueryEngine in project druid by druid-io.
the class AppenderatorDriverRealtimeIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory(final File directory) {
taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
publishedSegments = new CopyOnWriteArrayList<>();
ObjectMapper mapper = new DefaultObjectMapper();
mapper.registerSubtypes(LinearShardSpec.class);
mapper.registerSubtypes(NumberedShardSpec.class);
IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
Set<DataSegment> result = super.announceHistoricalSegments(segments);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result);
segments.forEach(s -> segmentLatch.countDown());
return result;
}
@Override
public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments, Set<DataSegment> segmentsToDrop, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
SegmentPublishResult result = super.announceHistoricalSegments(segments, segmentsToDrop, startMetadata, endMetadata);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result.getSegments());
result.getSegments().forEach(s -> segmentLatch.countDown());
return result;
}
};
taskLockbox = new TaskLockbox(taskStorage, mdc);
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, mdc, EMITTER, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), (query, future) -> {
// do nothing
})));
handOffCallbacks = new ConcurrentHashMap<>();
final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
handoffLatch.countDown();
return true;
}
@Override
public void start() {
// Noop
}
@Override
public void close() {
// Noop
}
};
final TestUtils testUtils = new TestUtils();
taskToolboxFactory = new TaskToolboxFactory(taskConfig, new DruidNode("druid/middlemanager", "localhost", false, 8091, null, true, false), taskActionClientFactory, EMITTER, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, () -> conglomerate, // queryExecutorService
DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
use of org.apache.druid.query.timeseries.TimeseriesQueryEngine in project druid by druid-io.
the class CachingClusteredClientBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
final String schemaName = "basic";
parallelCombine = parallelism > 0;
GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
Map<DataSegment, QueryableIndex> queryableIndexes = Maps.newHashMapWithExpectedSize(numServers);
for (int i = 0; i < numServers; i++) {
final DataSegment dataSegment = DataSegment.builder().dataSource(DATA_SOURCE).interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(i)).size(0).build();
final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
LOG.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
queryableIndexes.put(dataSegment, index);
}
final DruidProcessingConfig processingConfig = new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
@Override
public int intermediateComputeSizeBytes() {
return PROCESSING_BUFFER_SIZE;
}
@Override
public int getNumMergeBuffers() {
return 1;
}
@Override
public int getNumThreads() {
return numProcessingThreads;
}
@Override
public boolean useParallelMergePool() {
return true;
}
};
conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(new StupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, makeGroupByQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return GroupByStrategySelector.STRATEGY_V2;
}
}, processingConfig)).build());
toolChestWarehouse = new QueryToolChestWarehouse() {
@Override
public <T, QueryType extends Query<T>> QueryToolChest<T, QueryType> getToolChest(final QueryType query) {
return conglomerate.findFactory(query).getToolchest();
}
};
SimpleServerView serverView = new SimpleServerView();
int serverSuffx = 1;
for (Entry<DataSegment, QueryableIndex> entry : queryableIndexes.entrySet()) {
serverView.addServer(createServer(serverSuffx++), entry.getKey(), entry.getValue());
}
processingPool = Execs.multiThreaded(processingConfig.getNumThreads(), "caching-clustered-client-benchmark");
forkJoinPool = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * 0.75), ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);
cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, serverView, MapCache.create(0), JSON_MAPPER, new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), processingConfig, forkJoinPool, QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
}
Aggregations