use of org.apache.druid.query.topn.TopNResultValue in project druid by druid-io.
the class CachingClusteredClientTest method testOutOfOrderSequenceMerging.
@Test
public void testOutOfOrderSequenceMerging() {
List<Sequence<Result<TopNResultValue>>> sequences = ImmutableList.of(Sequences.simple(makeTopNResultsWithoutRename(DateTimes.of("2011-01-07"), "a", 50, 4991, "b", 50, 4990, "c", 50, 4989, DateTimes.of("2011-01-08"), "a", 50, 4988, "b", 50, 4987, "c", 50, 4986, DateTimes.of("2011-01-09"), "a", 50, 4985, "b", 50, 4984, "c", 50, 4983)), Sequences.simple(makeTopNResultsWithoutRename(DateTimes.of("2011-01-06T01"), "a", 50, 4991, "b", 50, 4990, "c", 50, 4989, DateTimes.of("2011-01-07T01"), "a", 50, 4991, "b", 50, 4990, "c", 50, 4989, DateTimes.of("2011-01-08T01"), "a", 50, 4988, "b", 50, 4987, "c", 50, 4986, DateTimes.of("2011-01-09T01"), "a", 50, 4985, "b", 50, 4984, "c", 50, 4983)));
TestHelper.assertExpectedResults(makeTopNResultsWithoutRename(DateTimes.of("2011-01-06T01"), "a", 50, 4991, "b", 50, 4990, "c", 50, 4989, DateTimes.of("2011-01-07"), "a", 50, 4991, "b", 50, 4990, "c", 50, 4989, DateTimes.of("2011-01-07T01"), "a", 50, 4991, "b", 50, 4990, "c", 50, 4989, DateTimes.of("2011-01-08"), "a", 50, 4988, "b", 50, 4987, "c", 50, 4986, DateTimes.of("2011-01-08T01"), "a", 50, 4988, "b", 50, 4987, "c", 50, 4986, DateTimes.of("2011-01-09"), "a", 50, 4985, "b", 50, 4984, "c", 50, 4983, DateTimes.of("2011-01-09T01"), "a", 50, 4985, "b", 50, 4984, "c", 50, 4983), mergeSequences(new TopNQueryBuilder().dataSource("test").intervals("2011-01-06/2011-01-10").dimension("a").metric("b").threshold(3).aggregators(new CountAggregatorFactory("b")).randomQueryId().build(), sequences));
}
use of org.apache.druid.query.topn.TopNResultValue in project druid by druid-io.
the class BackgroundCachePopulatorTest method makeTopNResults.
private List<Result> makeTopNResults(boolean cachedResults, Object... objects) {
List<Result> retVal = new ArrayList<>();
int index = 0;
while (index < objects.length) {
DateTime timestamp = (DateTime) objects[index++];
List<Map<String, Object>> values = new ArrayList<>();
while (index < objects.length && !(objects[index] instanceof DateTime)) {
if (objects.length - index < 3) {
throw new ISE("expect 3 values for each entry in the top list, had %d values left.", objects.length - index);
}
final double imps = ((Number) objects[index + 2]).doubleValue();
final double rows = ((Number) objects[index + 1]).doubleValue();
if (cachedResults) {
values.add(ImmutableMap.of("top_dim", objects[index], "rows", rows, "imps", imps, "impers", imps));
} else {
values.add(ImmutableMap.of("top_dim", objects[index], "rows", rows, "imps", imps, "impers", imps, "avg_imps_per_row", imps / rows));
}
index += 3;
}
retVal.add(new Result<>(timestamp, new TopNResultValue(values)));
}
return retVal;
}
use of org.apache.druid.query.topn.TopNResultValue in project druid by druid-io.
the class DummyStringVirtualColumnTest method testTopN.
private void testTopN(List<Segment> segments, boolean enableRowBasedMethods, boolean enableColumnBasedMethods) {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(Granularities.ALL).dimension(VSTRING_DIM).metric(COUNT).threshold(1).aggregators(new CountAggregatorFactory(COUNT)).virtualColumns(new DummyStringVirtualColumn(QueryRunnerTestHelper.MARKET_DIMENSION, VSTRING_DIM, enableRowBasedMethods, enableColumnBasedMethods, false, true)).intervals("2000/2030").build();
List rows = topNTestHelper.runQueryOnSegmentsObjs(segments, query).toList();
List<Result<TopNResultValue>> expectedRows = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Collections.<Map<String, Object>>singletonList(ImmutableMap.<String, Object>builder().put(COUNT, 1674L).put(VSTRING_DIM, "spot").build()))));
TestHelper.assertExpectedResults(expectedRows, (List<Result<TopNResultValue>>) rows, "failed");
}
use of org.apache.druid.query.topn.TopNResultValue in project druid by druid-io.
the class SchemalessTestSimpleTest method testFullOnTopN.
// @Test TODO: Handling of null values is inconsistent right now, need to make it all consistent and re-enable test
// TODO: Complain to Eric when you see this. It shouldn't be like this...
@Ignore
@SuppressWarnings("unused")
public void testFullOnTopN() {
TopNQuery query = new TopNQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).dimension(marketDimension).metric(indexMetric).threshold(3).intervals(fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.asList(new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "spot").put("rows", 4L).put("index", 400.0D).put("addRowsIndexConstant", 405.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0).put("minIndex", 100.0).build()), new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "").put("rows", 2L).put("index", 200.0D).put("addRowsIndexConstant", 203.0D).put("uniques", 0.0).put("maxIndex", 100.0D).put("minIndex", 100.0D).build()), new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "total_market").put("rows", 2L).put("index", 200.0D).put("addRowsIndexConstant", 203.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0D).put("minIndex", 100.0D).build())))));
try (CloseableStupidPool<ByteBuffer> pool = TestQueryRunners.createDefaultNonBlockingPool()) {
QueryRunner runner = TestQueryRunners.makeTopNQueryRunner(segment, pool);
TestHelper.assertExpectedResults(expectedResults, runner.run(QueryPlus.wrap(query)));
}
}
use of org.apache.druid.query.topn.TopNResultValue in project druid by druid-io.
the class TestHelper method assertTopNResultValue.
private static void assertTopNResultValue(String msg, Result expected, Result actual) {
TopNResultValue expectedVal = (TopNResultValue) expected.getValue();
TopNResultValue actualVal = (TopNResultValue) actual.getValue();
List<Row> listExpectedRows = expectedVal.getValue().stream().map(dimensionAndMetricValueExtractor -> new MapBasedRow(expected.getTimestamp(), dimensionAndMetricValueExtractor.getBaseObject())).collect(Collectors.toList());
List<Row> listActualRows = actualVal.getValue().stream().map(dimensionAndMetricValueExtractor -> new MapBasedRow(actual.getTimestamp(), dimensionAndMetricValueExtractor.getBaseObject())).collect(Collectors.toList());
Assert.assertEquals("Size of list must match", listExpectedRows.size(), listActualRows.size());
IntStream.range(0, listExpectedRows.size()).forEach(value -> assertRow(StringUtils.format("%s, on value number [%s]", msg, value), listExpectedRows.get(value), listActualRows.get(value)));
}
Aggregations