use of org.apache.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.
the class StringColumnAggregationTest method setup.
@Before
public void setup() throws Exception {
List<String> dimensions = ImmutableList.of(singleValue, multiValue);
List<InputRow> inputRows = new ArrayList<>(n);
for (int i = 1; i <= n; i++) {
String val = String.valueOf(i * 1.0d);
inputRows.add(new MapBasedInputRow(DateTime.now(DateTimeZone.UTC), dimensions, ImmutableMap.of(singleValue, val, multiValue, Lists.newArrayList(val, null, val))));
}
aggregationTestHelper = AggregationTestHelper.createGroupByQueryAggregationTestHelper(Collections.emptyList(), new GroupByQueryConfig(), tempFolder);
IncrementalIndex index = AggregationTestHelper.createIncrementalIndex(inputRows.iterator(), new NoopInputRowParser(null), new AggregatorFactory[] { new CountAggregatorFactory("count") }, 0, Granularities.NONE, false, 100, false);
this.segments = ImmutableList.of(new IncrementalIndexSegment(index, SegmentId.dummy("test")), aggregationTestHelper.persistIncrementalIndex(index, null));
// we have ingested arithmetic progression from 1 to 10, so sums can be computed using following
// All sum values are multiplied by 2 because we are running query on duplicated segment twice.
numRows = 2 * n;
singleValueSum = n * (n + 1);
multiValueSum = 2 * n * (n + 1);
singleValueMax = n;
multiValueMax = n;
singleValueMin = 1;
multiValueMin = 1;
}
use of org.apache.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.
the class VectorGroupByEngineIteratorTest method testCreateOneGrouperAndCloseItWhenClose.
@Test
public void testCreateOneGrouperAndCloseItWhenClose() throws IOException {
final Interval interval = TestIndex.DATA_INTERVAL;
final AggregatorFactory factory = new DoubleSumAggregatorFactory("index", "index");
final GroupByQuery query = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setInterval(interval).setDimensions(new DefaultDimensionSpec("market", null, null)).setAggregatorSpecs(factory).build();
final StorageAdapter storageAdapter = new QueryableIndexStorageAdapter(TestIndex.getMMappedTestIndex());
final ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[4096]);
final VectorCursor cursor = storageAdapter.makeVectorCursor(Filters.toFilter(query.getDimFilter()), interval, query.getVirtualColumns(), false, QueryContexts.getVectorSize(query), null);
final List<GroupByVectorColumnSelector> dimensions = query.getDimensions().stream().map(dimensionSpec -> ColumnProcessors.makeVectorProcessor(dimensionSpec, GroupByVectorColumnProcessorFactory.instance(), cursor.getColumnSelectorFactory())).collect(Collectors.toList());
final MutableObject<VectorGrouper> grouperCaptor = new MutableObject<>();
final VectorGroupByEngineIterator iterator = new VectorGroupByEngineIterator(query, new GroupByQueryConfig(), storageAdapter, cursor, interval, dimensions, byteBuffer, null) {
@Override
VectorGrouper makeGrouper() {
grouperCaptor.setValue(Mockito.spy(super.makeGrouper()));
return grouperCaptor.getValue();
}
};
iterator.close();
Mockito.verify(grouperCaptor.getValue()).close();
}
use of org.apache.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.
the class CachingClusteredClientTestUtils method createWarehouse.
/**
* Returns a new {@link QueryToolChestWarehouse} for unit tests and a resourceCloser which should be closed at the end
* of the test.
*/
public static Pair<QueryToolChestWarehouse, Closer> createWarehouse(ObjectMapper objectMapper) {
final Pair<GroupByQueryRunnerFactory, Closer> factoryCloserPair = GroupByQueryRunnerTest.makeQueryRunnerFactory(new GroupByQueryConfig());
final GroupByQueryRunnerFactory factory = factoryCloserPair.lhs;
final Closer resourceCloser = factoryCloserPair.rhs;
return Pair.of(new MapQueryToolChestWarehouse(ImmutableMap.<Class<? extends Query>, QueryToolChest>builder().put(TimeseriesQuery.class, new TimeseriesQueryQueryToolChest()).put(TopNQuery.class, new TopNQueryQueryToolChest(new TopNQueryConfig())).put(SearchQuery.class, new SearchQueryQueryToolChest(new SearchQueryConfig())).put(GroupByQuery.class, factory.getToolchest()).put(TimeBoundaryQuery.class, new TimeBoundaryQueryQueryToolChest()).build()), resourceCloser);
}
use of org.apache.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testFilterByNull.
@Test
public void testFilterByNull() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).setDimFilter(DimFilters.dimEquals("sally", (String) null)).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(1, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L), row.getEvent());
}
}
use of org.apache.druid.query.groupby.GroupByQueryConfig in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testObjectColumnSelectorOnVaryingColumnSchema.
@Test
public void testObjectColumnSelectorOnVaryingColumnSchema() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T00:00:00"), Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T01:00:00"), Lists.newArrayList("billy", "sally"), ImmutableMap.of("billy", "hip", "sally", "hop")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).addAggregator(new JavaScriptAggregatorFactory("fieldLength", Arrays.asList("sally", "billy"), "function(current, s, b) { return current + (s == null ? 0 : s.length) + (b == null ? 0 : b.length); }", "function() { return 0; }", "function(a,b) { return a + b; }", JavaScriptConfig.getEnabledInstance())).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(2, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L, "fieldLength", 2.0), row.getEvent());
row = (MapBasedRow) results.get(1);
Assert.assertEquals(ImmutableMap.of("billy", "hip", "sally", "hop", "cnt", 1L, "fieldLength", 6.0), row.getEvent());
}
}
Aggregations