use of io.druid.query.filter.OrDimFilter in project druid by druid-io.
the class FilteredAggregatorBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
incIndex = makeIncIndex(schemaInfo.getAggsArray());
filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new JavaScriptDimFilter("dimSequential", "function(x) { return false }", null, JavaScriptConfig.getEnabledInstance()), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Arrays.asList("X"), null)));
filteredMetrics = new AggregatorFactory[1];
filteredMetrics[0] = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
incIndexFilteredAgg = makeIncIndex(filteredMetrics);
inputRows = new ArrayList<>();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
inputRows.add(row);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
qIndex = INDEX_IO.loadIndex(indexFile);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
List<AggregatorFactory> queryAggs = new ArrayList<>();
queryAggs.add(filteredMetrics[0]);
query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
}
use of io.druid.query.filter.OrDimFilter in project druid by druid-io.
the class FilterPartitionBenchmark method readComplexOrFilterCNF.
@Benchmark
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
public void readComplexOrFilterCNF(Blackhole blackhole) throws Exception {
DimFilter dimFilter1 = new OrDimFilter(Arrays.<DimFilter>asList(new SelectorDimFilter("dimSequential", "199", null), new AndDimFilter(Arrays.<DimFilter>asList(new NoBitmapSelectorDimFilter("dimMultivalEnumerated2", "Corundum", null), new SelectorDimFilter("dimMultivalEnumerated", "Bar", null)))));
DimFilter dimFilter2 = new OrDimFilter(Arrays.<DimFilter>asList(new SelectorDimFilter("dimSequential", "299", null), new SelectorDimFilter("dimSequential", "399", null), new AndDimFilter(Arrays.<DimFilter>asList(new NoBitmapSelectorDimFilter("dimMultivalEnumerated2", "Xylophone", null), new SelectorDimFilter("dimMultivalEnumerated", "Foo", null)))));
DimFilter dimFilter3 = new OrDimFilter(Arrays.<DimFilter>asList(dimFilter1, dimFilter2, new AndDimFilter(Arrays.<DimFilter>asList(new NoBitmapSelectorDimFilter("dimMultivalEnumerated2", "Orange", null), new SelectorDimFilter("dimMultivalEnumerated", "World", null)))));
StorageAdapter sa = new QueryableIndexStorageAdapter(qIndex);
Sequence<Cursor> cursors = makeCursors(sa, Filters.convertToCNF(dimFilter3.toFilter()));
Sequence<List<String>> stringListSeq = readCursors(cursors, blackhole);
List<String> strings = Sequences.toList(Sequences.limit(stringListSeq, 1), Lists.<List<String>>newArrayList()).get(0);
for (String st : strings) {
blackhole.consume(st);
}
}
use of io.druid.query.filter.OrDimFilter in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByTimeExtractionNamedUnderUnderTime.
@Test
public void testGroupByTimeExtractionNamedUnderUnderTime() {
expectedException.expect(IAE.class);
expectedException.expectMessage("'__time' cannot be used as an output name for dimensions, aggregators, or post-aggregators.");
GroupByQuery query = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setQuerySegmentSpec(QueryRunnerTestHelper.fullOnInterval).setDimensions(Lists.newArrayList(new DefaultDimensionSpec("market", "market"), new ExtractionDimensionSpec(Column.TIME_COLUMN_NAME, Column.TIME_COLUMN_NAME, new TimeFormatExtractionFn("EEEE", null, null, null, false)))).setAggregatorSpecs(Arrays.asList(QueryRunnerTestHelper.rowsCount, QueryRunnerTestHelper.indexDoubleSum)).setPostAggregatorSpecs(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).setGranularity(QueryRunnerTestHelper.allGran).setDimFilter(new OrDimFilter(Arrays.<DimFilter>asList(new SelectorDimFilter("market", "spot", null), new SelectorDimFilter("market", "upfront", null)))).setLimitSpec(new DefaultLimitSpec(ImmutableList.<OrderByColumnSpec>of(), 1)).build();
}
Aggregations