use of org.joda.time.DateTime in project druid by druid-io.
the class TimeBoundaryQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TimeBoundaryResultValue>, Object, TimeBoundaryQuery> strategy = new TimeBoundaryQueryQueryToolChest().getCacheStrategy(new TimeBoundaryQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, null, null));
final Result<TimeBoundaryResultValue> result = new Result<>(new DateTime(123L), new TimeBoundaryResultValue(ImmutableMap.of(TimeBoundaryQuery.MIN_TIME, new DateTime(0L).toString(), TimeBoundaryQuery.MAX_TIME, new DateTime("2015-01-01").toString())));
Object preparedValue = strategy.prepareForCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TimeBoundaryResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
use of org.joda.time.DateTime in project druid by druid-io.
the class TopNBinaryFnBenchmark method setUp.
@Override
protected void setUp() throws Exception {
final ConstantPostAggregator constant = new ConstantPostAggregator("const", 1L);
final FieldAccessPostAggregator rowsPostAgg = new FieldAccessPostAggregator("rows", "rows");
final FieldAccessPostAggregator indexPostAgg = new FieldAccessPostAggregator("index", "index");
final List<AggregatorFactory> aggregatorFactories = new ArrayList<>();
aggregatorFactories.add(new CountAggregatorFactory("rows"));
aggregatorFactories.add(new LongSumAggregatorFactory("index", "index"));
for (int i = 1; i < aggCount; i++) {
aggregatorFactories.add(new CountAggregatorFactory("rows" + i));
}
final List<PostAggregator> postAggregators = new ArrayList<>();
for (int i = 0; i < postAggCount; i++) {
postAggregators.add(new ArithmeticPostAggregator("addrowsindexconstant" + i, "+", Lists.newArrayList(constant, rowsPostAgg, indexPostAgg)));
}
final DateTime currTime = new DateTime();
List<Map<String, Object>> list = new ArrayList<>();
for (int i = 0; i < threshold; i++) {
Map<String, Object> res = new HashMap<>();
res.put("testdim", "" + i);
res.put("rows", 1L);
for (int j = 0; j < aggCount; j++) {
res.put("rows" + j, 1L);
}
res.put("index", 1L);
list.add(res);
}
result1 = new Result<>(currTime, new TopNResultValue(list));
List<Map<String, Object>> list2 = new ArrayList<>();
for (int i = 0; i < threshold; i++) {
Map<String, Object> res = new HashMap<>();
res.put("testdim", "" + i);
res.put("rows", 2L);
for (int j = 0; j < aggCount; j++) {
res.put("rows" + j, 2L);
}
res.put("index", 2L);
list2.add(res);
}
result2 = new Result<>(currTime, new TopNResultValue(list2));
fn = new TopNBinaryFn(TopNResultMerger.identity, Granularities.ALL, new DefaultDimensionSpec("testdim", null), new NumericTopNMetricSpec("index"), 100, aggregatorFactories, postAggregators);
}
use of org.joda.time.DateTime in project druid by druid-io.
the class TopNQueryRunnerTest method testTopNDimExtractionToOne.
@Test
public void testTopNDimExtractionToOne() throws IOException {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(QueryRunnerTestHelper.allGran).dimension(new ExtractionDimensionSpec(QueryRunnerTestHelper.marketDimension, QueryRunnerTestHelper.marketDimension, new JavaScriptExtractionFn("function(f) { return \"POTATO\"; }", false, JavaScriptConfig.getEnabledInstance()))).metric("rows").threshold(10).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(QueryRunnerTestHelper.commonAggregators).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).build();
Granularity gran = Granularities.DAY;
TimeseriesQuery tsQuery = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(gran).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(Arrays.asList(QueryRunnerTestHelper.rowsCount, QueryRunnerTestHelper.indexDoubleSum, QueryRunnerTestHelper.qualityUniques)).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).build();
List<Result<TopNResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>of("addRowsIndexConstant", 504542.5071372986D, "index", 503332.5071372986D, QueryRunnerTestHelper.marketDimension, "POTATO", "uniques", QueryRunnerTestHelper.UNIQUES_9, "rows", 1209L)))));
List<Result<TopNResultValue>> list = Sequences.toList(runWithMerge(query), new ArrayList<Result<TopNResultValue>>());
Assert.assertEquals(list.size(), 1);
Assert.assertEquals("Didn't merge results", list.get(0).getValue().getValue().size(), 1);
TestHelper.assertExpectedResults(expectedResults, list, "Failed to match");
}
use of org.joda.time.DateTime in project druid by druid-io.
the class TopNQueryRunnerTest method testNumericDimensionTopNWithNullPreviousStop.
@Test
public void testNumericDimensionTopNWithNullPreviousStop() {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(Granularities.ALL).dimension(QueryRunnerTestHelper.marketDimension).metric(new DimensionTopNMetricSpec(null, StringComparators.NUMERIC)).threshold(2).intervals(QueryRunnerTestHelper.secondOnly).aggregators(duplicateAggregators(QueryRunnerTestHelper.rowsCount, new CountAggregatorFactory("rows1"))).build();
List<Result<TopNResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-04-02T00:00:00.000Z"), new TopNResultValue(withDuplicateResults(Arrays.asList(ImmutableMap.<String, Object>of("market", "spot", "rows", 9L), ImmutableMap.<String, Object>of("market", "total_market", "rows", 2L)), "rows", "rows1"))));
TestHelper.assertExpectedResults(expectedResults, runner.run(query, new HashMap<String, Object>()));
}
use of org.joda.time.DateTime in project druid by druid-io.
the class TopNQueryRunnerTest method testTopNWithExtractionFilter.
@Test
public void testTopNWithExtractionFilter() {
Map<String, String> extractionMap = new HashMap<>();
extractionMap.put("spot", "spot0");
MapLookupExtractor mapLookupExtractor = new MapLookupExtractor(extractionMap, false);
LookupExtractionFn lookupExtractionFn = new LookupExtractionFn(mapLookupExtractor, false, null, true, false);
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(QueryRunnerTestHelper.allGran).dimension(QueryRunnerTestHelper.marketDimension).metric("rows").threshold(3).intervals(QueryRunnerTestHelper.firstToThird).aggregators(QueryRunnerTestHelper.commonAggregators).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).filters(new ExtractionDimFilter(QueryRunnerTestHelper.marketDimension, "spot0", lookupExtractionFn, null)).build();
List<Result<TopNResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-04-01T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>of(QueryRunnerTestHelper.marketDimension, "spot", "rows", 18L, "index", 2231.8768157958984D, "addRowsIndexConstant", 2250.8768157958984D, "uniques", QueryRunnerTestHelper.UNIQUES_9)))));
assertExpectedResults(expectedResults, query);
// Assert the optimization path as well
final Sequence<Result<TopNResultValue>> retval = runWithPreMergeAndMerge(query);
TestHelper.assertExpectedResults(expectedResults, retval);
}
Aggregations