use of io.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class RetryQueryRunnerTest method testRunWithMissingSegments.
@Test
public void testRunWithMissingSegments() throws Exception {
Map<String, Object> context = new MapMaker().makeMap();
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
RetryQueryRunner<Result<TimeseriesResultValue>> runner = new RetryQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(Query query, Map context) {
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 1));
return Sequences.empty();
}
}, (QueryToolChest) new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new RetryQueryRunnerConfig() {
@Override
public int getNumTries() {
return 0;
}
@Override
public boolean isReturnPartialResults() {
return true;
}
}, jsonMapper);
Iterable<Result<TimeseriesResultValue>> actualResults = Sequences.toList(runner.run(query, context), Lists.<Result<TimeseriesResultValue>>newArrayList());
Assert.assertTrue("Should have one entry in the list of missing segments", ((List) context.get(Result.MISSING_SEGMENTS_KEY)).size() == 1);
Assert.assertTrue("Should return an empty sequence as a result", ((List) actualResults).size() == 0);
}
use of io.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class RetryQueryRunnerTest method testException.
@Test(expected = SegmentMissingException.class)
public void testException() throws Exception {
Map<String, Object> context = new MapMaker().makeMap();
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
RetryQueryRunner<Result<TimeseriesResultValue>> runner = new RetryQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(Query<Result<TimeseriesResultValue>> query, Map<String, Object> context) {
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 1));
return Sequences.empty();
}
}, (QueryToolChest) new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new RetryQueryRunnerConfig() {
private int numTries = 1;
private boolean returnPartialResults = false;
public int getNumTries() {
return numTries;
}
public boolean returnPartialResults() {
return returnPartialResults;
}
}, jsonMapper);
Iterable<Result<TimeseriesResultValue>> actualResults = Sequences.toList(runner.run(query, context), Lists.<Result<TimeseriesResultValue>>newArrayList());
Assert.assertTrue("Should have one entry in the list of missing segments", ((List) context.get(Result.MISSING_SEGMENTS_KEY)).size() == 1);
}
use of io.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class RetryQueryRunnerTest method testRetry.
@Test
public void testRetry() throws Exception {
Map<String, Object> context = new MapMaker().makeMap();
context.put("count", 0);
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
RetryQueryRunner<Result<TimeseriesResultValue>> runner = new RetryQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(Query<Result<TimeseriesResultValue>> query, Map<String, Object> context) {
if ((int) context.get("count") == 0) {
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 1));
context.put("count", 1);
return Sequences.empty();
} else {
return Sequences.simple(Arrays.asList(new Result<>(new DateTime(), new TimeseriesResultValue(Maps.<String, Object>newHashMap()))));
}
}
}, (QueryToolChest) new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new RetryQueryRunnerConfig() {
private int numTries = 1;
private boolean returnPartialResults = true;
public int getNumTries() {
return numTries;
}
public boolean returnPartialResults() {
return returnPartialResults;
}
}, jsonMapper);
Iterable<Result<TimeseriesResultValue>> actualResults = Sequences.toList(runner.run(query, context), Lists.<Result<TimeseriesResultValue>>newArrayList());
Assert.assertTrue("Should return a list with one element", ((List) actualResults).size() == 1);
Assert.assertTrue("Should have nothing in missingSegment list", ((List) context.get(Result.MISSING_SEGMENTS_KEY)).size() == 0);
}
use of io.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class FilteredAggregatorBenchmark method setup.
@Setup
public void setup() throws IOException {
log.info("SETUP CALLED AT " + System.currentTimeMillis());
if (ComplexMetrics.getSerdeForType("hyperUnique") == null) {
ComplexMetrics.registerSerde("hyperUnique", new HyperUniquesSerde(HyperLogLogHash.getDefault()));
}
schemaInfo = BenchmarkSchemas.SCHEMA_MAP.get(schema);
BenchmarkDataGenerator gen = new BenchmarkDataGenerator(schemaInfo.getColumnSchemas(), RNG_SEED, schemaInfo.getDataInterval(), rowsPerSegment);
incIndex = makeIncIndex(schemaInfo.getAggsArray());
filter = new OrDimFilter(Arrays.asList(new BoundDimFilter("dimSequential", "-1", "-1", true, true, null, null, StringComparators.ALPHANUMERIC), new JavaScriptDimFilter("dimSequential", "function(x) { return false }", null, JavaScriptConfig.getEnabledInstance()), new RegexDimFilter("dimSequential", "X", null), new SearchQueryDimFilter("dimSequential", new ContainsSearchQuerySpec("X", false), null), new InDimFilter("dimSequential", Arrays.asList("X"), null)));
filteredMetrics = new AggregatorFactory[1];
filteredMetrics[0] = new FilteredAggregatorFactory(new CountAggregatorFactory("rows"), filter);
incIndexFilteredAgg = makeIncIndex(filteredMetrics);
inputRows = new ArrayList<>();
for (int j = 0; j < rowsPerSegment; j++) {
InputRow row = gen.nextRow();
if (j % 10000 == 0) {
log.info(j + " rows generated.");
}
incIndex.add(row);
inputRows.add(row);
}
tmpDir = Files.createTempDir();
log.info("Using temp dir: " + tmpDir.getAbsolutePath());
indexFile = INDEX_MERGER_V9.persist(incIndex, tmpDir, new IndexSpec());
qIndex = INDEX_IO.loadIndex(indexFile);
factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()), new TimeseriesQueryEngine(), QueryBenchmarkUtil.NOOP_QUERYWATCHER);
BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Arrays.asList(basicSchema.getDataInterval()));
List<AggregatorFactory> queryAggs = new ArrayList<>();
queryAggs.add(filteredMetrics[0]);
query = Druids.newTimeseriesQueryBuilder().dataSource("blah").granularity(Granularities.ALL).intervals(intervalSpec).aggregators(queryAggs).descending(false).build();
}
use of io.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class CachingClusteredClientTest method testSingleDimensionPruning.
@Test
public void testSingleDimensionPruning() throws Exception {
DimFilter filter = Druids.newAndDimFilterBuilder().fields(Arrays.asList(Druids.newOrDimFilterBuilder().fields(Arrays.asList(new SelectorDimFilter("dim1", "a", null), new BoundDimFilter("dim1", "from", "to", false, false, false, null, StringComparators.LEXICOGRAPHIC))).build(), Druids.newAndDimFilterBuilder().fields(Arrays.asList(new InDimFilter("dim2", Arrays.asList("a", "c", "e", "g"), null), new BoundDimFilter("dim2", "aaa", "hi", false, false, false, null, StringComparators.LEXICOGRAPHIC), new BoundDimFilter("dim2", "e", "zzz", true, true, false, null, StringComparators.LEXICOGRAPHIC))).build())).build();
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).filters(filter).granularity(GRANULARITY).intervals(SEG_SPEC).context(CONTEXT).intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS);
TimeseriesQuery query = builder.build();
Map<String, List> context = new HashMap<>();
final Interval interval1 = new Interval("2011-01-06/2011-01-07");
final Interval interval2 = new Interval("2011-01-07/2011-01-08");
final Interval interval3 = new Interval("2011-01-08/2011-01-09");
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
final DruidServer lastServer = servers[random.nextInt(servers.length)];
ServerSelector selector1 = makeMockSingleDimensionSelector(lastServer, "dim1", null, "b", 1);
ServerSelector selector2 = makeMockSingleDimensionSelector(lastServer, "dim1", "e", "f", 2);
ServerSelector selector3 = makeMockSingleDimensionSelector(lastServer, "dim1", "hi", "zzz", 3);
ServerSelector selector4 = makeMockSingleDimensionSelector(lastServer, "dim2", "a", "e", 4);
ServerSelector selector5 = makeMockSingleDimensionSelector(lastServer, "dim2", null, null, 5);
ServerSelector selector6 = makeMockSingleDimensionSelector(lastServer, "other", "b", null, 6);
timeline.add(interval1, "v", new StringPartitionChunk<>(null, "a", 1, selector1));
timeline.add(interval1, "v", new StringPartitionChunk<>("a", "b", 2, selector2));
timeline.add(interval1, "v", new StringPartitionChunk<>("b", null, 3, selector3));
timeline.add(interval2, "v", new StringPartitionChunk<>(null, "d", 4, selector4));
timeline.add(interval2, "v", new StringPartitionChunk<>("d", null, 5, selector5));
timeline.add(interval3, "v", new StringPartitionChunk<>(null, null, 6, selector6));
final Capture<TimeseriesQuery> capture = Capture.newInstance();
final Capture<Map<String, List>> contextCap = Capture.newInstance();
QueryRunner mockRunner = EasyMock.createNiceMock(QueryRunner.class);
EasyMock.expect(mockRunner.run(EasyMock.capture(capture), EasyMock.capture(contextCap))).andReturn(Sequences.empty()).anyTimes();
EasyMock.expect(serverView.getQueryRunner(lastServer)).andReturn(mockRunner).anyTimes();
EasyMock.replay(serverView);
EasyMock.replay(mockRunner);
List<SegmentDescriptor> descriptors = new ArrayList<>();
descriptors.add(new SegmentDescriptor(interval1, "v", 1));
descriptors.add(new SegmentDescriptor(interval1, "v", 3));
descriptors.add(new SegmentDescriptor(interval2, "v", 5));
descriptors.add(new SegmentDescriptor(interval3, "v", 6));
MultipleSpecificSegmentSpec expected = new MultipleSpecificSegmentSpec(descriptors);
Sequences.toList(runner.run(query, context), Lists.newArrayList());
Assert.assertEquals(expected, capture.getValue().getQuerySegmentSpec());
}
Aggregations