use of org.apache.druid.query.extraction.MapLookupExtractor in project druid by druid-io.
the class GroupByQueryRunnerTest method testBySegmentResultsOptimizedDimextraction.
@Test
public void testBySegmentResultsOptimizedDimextraction() {
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new ExtractionDimensionSpec("quality", "alias", new LookupExtractionFn(new MapLookupExtractor(ImmutableMap.of("mezzanine", "mezzanine0"), false), false, null, true, false))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setDimFilter(new SelectorDimFilter("quality", "mezzanine", null)).overrideContext(ImmutableMap.of(QueryContexts.BY_SEGMENT_KEY, true));
final GroupByQuery fullQuery = builder.build();
int segmentCount = 32;
Result<BySegmentResultValue<ResultRow>> singleSegmentResult = new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new BySegmentResultValueClass<>(Collections.singletonList(makeRow(fullQuery, "2011-04-01", "alias", "mezzanine0", "rows", 6L, "idx", 4420L)), QueryRunnerTestHelper.SEGMENT_ID.toString(), Intervals.of("2011-04-02T00:00:00.000Z/2011-04-04T00:00:00.000Z")));
List<Result> bySegmentResults = new ArrayList<>();
for (int i = 0; i < segmentCount; i++) {
bySegmentResults.add(singleSegmentResult);
}
QueryToolChest toolChest = factory.getToolchest();
List<QueryRunner<ResultRow>> singleSegmentRunners = new ArrayList<>();
for (int i = 0; i < segmentCount; i++) {
singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
}
ExecutorService exec = Executors.newCachedThreadPool();
QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(Executors.newCachedThreadPool(), singleSegmentRunners)), toolChest));
TestHelper.assertExpectedObjects(bySegmentResults, theRunner.run(QueryPlus.wrap(fullQuery)), "bySegment-dim-extraction");
exec.shutdownNow();
}
use of org.apache.druid.query.extraction.MapLookupExtractor in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByWithAllFiltersOnNullDimsWithExtractionFns.
@Test
public void testGroupByWithAllFiltersOnNullDimsWithExtractionFns() {
Map<String, String> extractionMap = new HashMap<>();
extractionMap.put("", "EMPTY");
extractionMap.put(null, "EMPTY");
MapLookupExtractor mapLookupExtractor = new MapLookupExtractor(extractionMap, false);
LookupExtractionFn extractionFn = new LookupExtractionFn(mapLookupExtractor, false, "EMPTY", true, true);
String jsFn = "function(x) { return(x === 'EMPTY') }";
List<DimFilter> superFilterList = new ArrayList<>();
superFilterList.add(new SelectorDimFilter("null_column", "EMPTY", extractionFn));
superFilterList.add(new InDimFilter("null_column", Arrays.asList("NOT-EMPTY", "FOOBAR", "EMPTY"), extractionFn));
superFilterList.add(new BoundDimFilter("null_column", "EMPTY", "EMPTY", false, false, true, extractionFn, StringComparators.ALPHANUMERIC));
superFilterList.add(new RegexDimFilter("null_column", "EMPTY", extractionFn));
superFilterList.add(new SearchQueryDimFilter("null_column", new ContainsSearchQuerySpec("EMPTY", true), extractionFn));
superFilterList.add(new JavaScriptDimFilter("null_column", jsFn, extractionFn, JavaScriptConfig.getEnabledInstance()));
DimFilter superFilter = new AndDimFilter(superFilterList);
GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("null_column", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setDimFilter(superFilter).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "2011-04-01", "alias", null, "rows", 13L, "idx", 6619L), makeRow(query, "2011-04-02", "alias", null, "rows", 13L, "idx", 5827L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "extraction");
}
use of org.apache.druid.query.extraction.MapLookupExtractor in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByWithExtractionDimFilterNullDims.
@Test
public void testGroupByWithExtractionDimFilterNullDims() {
Map<String, String> extractionMap = new HashMap<>();
extractionMap.put("", "EMPTY");
MapLookupExtractor mapLookupExtractor = new MapLookupExtractor(extractionMap, false);
LookupExtractionFn lookupExtractionFn;
if (NullHandling.replaceWithDefault()) {
extractionMap.put("", "EMPTY");
lookupExtractionFn = new LookupExtractionFn(mapLookupExtractor, false, null, true, true);
} else {
extractionMap.put("", "SHOULD_NOT_BE_USED");
lookupExtractionFn = new LookupExtractionFn(mapLookupExtractor, false, "EMPTY", true, true);
}
GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("null_column", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setDimFilter(new ExtractionDimFilter("null_column", "EMPTY", lookupExtractionFn, null)).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "2011-04-01", "alias", null, "rows", 13L, "idx", 6619L), makeRow(query, "2011-04-02", "alias", null, "rows", 13L, "idx", 5827L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "extraction-dim-filter");
}
use of org.apache.druid.query.extraction.MapLookupExtractor in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByWithSimpleRenameRetainMissing.
@Test
public void testGroupByWithSimpleRenameRetainMissing() {
Map<String, String> map = new HashMap<>();
map.put("automotive", "automotive0");
map.put("business", "business0");
map.put("entertainment", "entertainment0");
map.put("health", "health0");
map.put("mezzanine", "mezzanine0");
map.put("news", "news0");
map.put("premium", "premium0");
map.put("technology", "technology0");
map.put("travel", "travel0");
GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new ExtractionDimensionSpec("quality", "alias", new LookupExtractionFn(new MapLookupExtractor(map, false), true, null, true, false))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "2011-04-01", "alias", "automotive0", "rows", 1L, "idx", 135L), makeRow(query, "2011-04-01", "alias", "business0", "rows", 1L, "idx", 118L), makeRow(query, "2011-04-01", "alias", "entertainment0", "rows", 1L, "idx", 158L), makeRow(query, "2011-04-01", "alias", "health0", "rows", 1L, "idx", 120L), makeRow(query, "2011-04-01", "alias", "mezzanine0", "rows", 3L, "idx", 2870L), makeRow(query, "2011-04-01", "alias", "news0", "rows", 1L, "idx", 121L), makeRow(query, "2011-04-01", "alias", "premium0", "rows", 3L, "idx", 2900L), makeRow(query, "2011-04-01", "alias", "technology0", "rows", 1L, "idx", 78L), makeRow(query, "2011-04-01", "alias", "travel0", "rows", 1L, "idx", 119L), makeRow(query, "2011-04-02", "alias", "automotive0", "rows", 1L, "idx", 147L), makeRow(query, "2011-04-02", "alias", "business0", "rows", 1L, "idx", 112L), makeRow(query, "2011-04-02", "alias", "entertainment0", "rows", 1L, "idx", 166L), makeRow(query, "2011-04-02", "alias", "health0", "rows", 1L, "idx", 113L), makeRow(query, "2011-04-02", "alias", "mezzanine0", "rows", 3L, "idx", 2447L), makeRow(query, "2011-04-02", "alias", "news0", "rows", 1L, "idx", 114L), makeRow(query, "2011-04-02", "alias", "premium0", "rows", 3L, "idx", 2505L), makeRow(query, "2011-04-02", "alias", "technology0", "rows", 1L, "idx", 97L), makeRow(query, "2011-04-02", "alias", "travel0", "rows", 1L, "idx", 126L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "retain-missing");
}
use of org.apache.druid.query.extraction.MapLookupExtractor in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByWithAlphaNumericDimensionOrder.
@Test
public void testGroupByWithAlphaNumericDimensionOrder() {
// Cannot vectorize due to extraction dimension spec.
cannotVectorize();
Map<String, String> map = new HashMap<>();
map.put("automotive", "health105");
map.put("business", "health20");
map.put("entertainment", "travel47");
map.put("health", "health55");
map.put("mezzanine", "health09");
map.put("news", "health0000");
map.put("premium", "health999");
map.put("technology", "travel123");
map.put("travel", "travel555");
GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new ExtractionDimensionSpec("quality", "alias", new LookupExtractionFn(new MapLookupExtractor(map, false), false, null, false, false))).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("alias", null, StringComparators.ALPHANUMERIC)), null)).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "2011-04-01", "alias", "health0000", "rows", 1L, "idx", 121L), makeRow(query, "2011-04-01", "alias", "health09", "rows", 3L, "idx", 2870L), makeRow(query, "2011-04-01", "alias", "health20", "rows", 1L, "idx", 118L), makeRow(query, "2011-04-01", "alias", "health55", "rows", 1L, "idx", 120L), makeRow(query, "2011-04-01", "alias", "health105", "rows", 1L, "idx", 135L), makeRow(query, "2011-04-01", "alias", "health999", "rows", 3L, "idx", 2900L), makeRow(query, "2011-04-01", "alias", "travel47", "rows", 1L, "idx", 158L), makeRow(query, "2011-04-01", "alias", "travel123", "rows", 1L, "idx", 78L), makeRow(query, "2011-04-01", "alias", "travel555", "rows", 1L, "idx", 119L), makeRow(query, "2011-04-02", "alias", "health0000", "rows", 1L, "idx", 114L), makeRow(query, "2011-04-02", "alias", "health09", "rows", 3L, "idx", 2447L), makeRow(query, "2011-04-02", "alias", "health20", "rows", 1L, "idx", 112L), makeRow(query, "2011-04-02", "alias", "health55", "rows", 1L, "idx", 113L), makeRow(query, "2011-04-02", "alias", "health105", "rows", 1L, "idx", 147L), makeRow(query, "2011-04-02", "alias", "health999", "rows", 3L, "idx", 2505L), makeRow(query, "2011-04-02", "alias", "travel47", "rows", 1L, "idx", 166L), makeRow(query, "2011-04-02", "alias", "travel123", "rows", 1L, "idx", 97L), makeRow(query, "2011-04-02", "alias", "travel555", "rows", 1L, "idx", 126L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "alphanumeric-dimension-order");
}
Aggregations