use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class ApproximateHistogramPostAggregatorTest method testResultArraySignature.
@Test
public void testResultArraySignature() {
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2000/3000").granularity(Granularities.HOUR).aggregators(new ApproximateHistogramAggregatorFactory("approxHisto", "col", null, null, null, null, false)).postAggregators(new BucketsPostAggregator("bucket", "approxHisto", 100, 0), new EqualBucketsPostAggregator("equal", "approxHisto", 5), new CustomBucketsPostAggregator("custom", "approxHisto", new float[] { 1.0f, 20.0f, 75.0f }), new MinPostAggregator("min", "approxHisto"), new MaxPostAggregator("max", "approxHisto"), new QuantilePostAggregator("quantile", "approxHisto", 0.5f), new QuantilesPostAggregator("quantiles", "approxHisto", new float[] { 0.2f, 0.5f, 0.75f })).build();
Assert.assertEquals(RowSignature.builder().addTimeColumn().add("approxHisto", null).add("bucket", HistogramAggregatorFactory.TYPE).add("equal", HistogramAggregatorFactory.TYPE).add("custom", HistogramAggregatorFactory.TYPE).add("min", ColumnType.DOUBLE).add("max", ColumnType.DOUBLE).add("quantile", ColumnType.FLOAT).add("quantiles", ColumnType.UNKNOWN_COMPLEX).build(), new TimeseriesQueryQueryToolChest().resultArraySignature(query));
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class ApproximateHistogramAggregatorTest method testResultArraySignature.
@Test
public void testResultArraySignature() {
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2000/3000").granularity(Granularities.HOUR).aggregators(new ApproximateHistogramAggregatorFactory("approxHisto", "col", null, null, null, null, false), new ApproximateHistogramAggregatorFactory("approxHistoBin", "col", null, null, null, null, true)).postAggregators(new FieldAccessPostAggregator("approxHisto-access", "approxHisto"), new FinalizingFieldAccessPostAggregator("approxHisto-finalize", "approxHisto"), new FieldAccessPostAggregator("approxHistoBin-access", "approxHistoBin"), new FinalizingFieldAccessPostAggregator("approxHistoBin-finalize", "approxHistoBin")).build();
Assert.assertEquals(RowSignature.builder().addTimeColumn().add("approxHisto", null).add("approxHistoBin", ApproximateHistogramAggregatorFactory.TYPE).add("approxHisto-access", ApproximateHistogramAggregatorFactory.TYPE).add("approxHisto-finalize", HistogramAggregatorFactory.TYPE).add("approxHistoBin-access", ApproximateHistogramAggregatorFactory.TYPE).add("approxHistoBin-finalize", ApproximateHistogramAggregatorFactory.TYPE).build(), new TimeseriesQueryQueryToolChest().resultArraySignature(query));
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class FixedBucketsHistogramBufferAggregatorTest method testResultArraySignature.
@Test
public void testResultArraySignature() {
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2000/3000").granularity(Granularities.HOUR).aggregators(new FixedBucketsHistogramAggregatorFactory("fixedHisto", "col", null, 0, 100, null, false), new FixedBucketsHistogramAggregatorFactory("fixedHistoBin", "col", null, 0, 100, null, true)).postAggregators(new FieldAccessPostAggregator("fixedHisto-access", "fixedHisto"), new FinalizingFieldAccessPostAggregator("fixedHisto-finalize", "fixedHisto"), new FieldAccessPostAggregator("fixedHistoBin-access", "fixedHistoBin"), new FinalizingFieldAccessPostAggregator("fixedHistoBin-finalize", "fixedHistoBin")).build();
Assert.assertEquals(RowSignature.builder().addTimeColumn().add("fixedHisto", null).add("fixedHistoBin", FixedBucketsHistogramAggregator.TYPE).add("fixedHisto-access", FixedBucketsHistogramAggregator.TYPE).add("fixedHisto-finalize", ColumnType.STRING).add("fixedHistoBin-access", FixedBucketsHistogramAggregator.TYPE).add("fixedHistoBin-finalize", FixedBucketsHistogramAggregator.TYPE).build(), new TimeseriesQueryQueryToolChest().resultArraySignature(query));
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class CachingClusteredClientBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
final String schemaName = "basic";
parallelCombine = parallelism > 0;
GeneratorSchemaInfo schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
Map<DataSegment, QueryableIndex> queryableIndexes = Maps.newHashMapWithExpectedSize(numServers);
for (int i = 0; i < numServers; i++) {
final DataSegment dataSegment = DataSegment.builder().dataSource(DATA_SOURCE).interval(schemaInfo.getDataInterval()).version("1").shardSpec(new LinearShardSpec(i)).size(0).build();
final SegmentGenerator segmentGenerator = closer.register(new SegmentGenerator());
LOG.info("Starting benchmark setup using cacheDir[%s], rows[%,d].", segmentGenerator.getCacheDir(), rowsPerSegment);
final QueryableIndex index = segmentGenerator.generate(dataSegment, schemaInfo, Granularities.NONE, rowsPerSegment);
queryableIndexes.put(dataSegment, index);
}
final DruidProcessingConfig processingConfig = new DruidProcessingConfig() {
@Override
public String getFormatString() {
return null;
}
@Override
public int intermediateComputeSizeBytes() {
return PROCESSING_BUFFER_SIZE;
}
@Override
public int getNumMergeBuffers() {
return 1;
}
@Override
public int getNumThreads() {
return numProcessingThreads;
}
@Override
public boolean useParallelMergePool() {
return true;
}
};
conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.<Class<? extends Query>, QueryRunnerFactory>builder().put(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(TopNQuery.class, new TopNQueryRunnerFactory(new StupidPool<>("TopNQueryRunnerFactory-bufferPool", () -> ByteBuffer.allocate(PROCESSING_BUFFER_SIZE)), new TopNQueryQueryToolChest(new TopNQueryConfig()), QueryRunnerTestHelper.NOOP_QUERYWATCHER)).put(GroupByQuery.class, makeGroupByQueryRunnerFactory(GroupByQueryRunnerTest.DEFAULT_MAPPER, new GroupByQueryConfig() {
@Override
public String getDefaultStrategy() {
return GroupByStrategySelector.STRATEGY_V2;
}
}, processingConfig)).build());
toolChestWarehouse = new QueryToolChestWarehouse() {
@Override
public <T, QueryType extends Query<T>> QueryToolChest<T, QueryType> getToolChest(final QueryType query) {
return conglomerate.findFactory(query).getToolchest();
}
};
SimpleServerView serverView = new SimpleServerView();
int serverSuffx = 1;
for (Entry<DataSegment, QueryableIndex> entry : queryableIndexes.entrySet()) {
serverView.addServer(createServer(serverSuffx++), entry.getKey(), entry.getValue());
}
processingPool = Execs.multiThreaded(processingConfig.getNumThreads(), "caching-clustered-client-benchmark");
forkJoinPool = new ForkJoinPool((int) Math.ceil(Runtime.getRuntime().availableProcessors() * 0.75), ForkJoinPool.defaultForkJoinWorkerThreadFactory, null, true);
cachingClusteredClient = new CachingClusteredClient(toolChestWarehouse, serverView, MapCache.create(0), JSON_MAPPER, new ForegroundCachePopulator(JSON_MAPPER, new CachePopulatorStats(), 0), new CacheConfig(), new DruidHttpClientConfig(), processingConfig, forkJoinPool, QueryStackTests.DEFAULT_NOOP_SCHEDULER, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), new NoopServiceEmitter());
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class TimeseriesBenchmark method setup.
/**
* Setup everything common for benchmarking both the incremental-index and the queriable-index.
*/
@Setup
public void setup() {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde());
setupQueries();
String[] schemaQuery = schemaAndQuery.split("\\.");
String schemaName = schemaQuery[0];
String queryName = schemaQuery[1];
schemaInfo = GeneratorBasicSchemas.SCHEMA_MAP.get(schemaName);
query = SCHEMA_QUERY_MAP.get(schemaName).get(queryName);
generator = new DataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
}
Aggregations