use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class GroupByTimeseriesQueryRunnerTest method constructorFeeder.
@SuppressWarnings("unchecked")
@Parameterized.Parameters(name = "{0}, vectorize = {1}")
public static Iterable<Object[]> constructorFeeder() {
GroupByQueryConfig config = new GroupByQueryConfig();
config.setMaxIntermediateRows(10000);
final Pair<GroupByQueryRunnerFactory, Closer> factoryAndCloser = GroupByQueryRunnerTest.makeQueryRunnerFactory(config);
final GroupByQueryRunnerFactory factory = factoryAndCloser.lhs;
RESOURCE_CLOSER.register(factoryAndCloser.rhs);
final List<Object[]> constructors = new ArrayList<>();
for (QueryRunner<ResultRow> runner : QueryRunnerTestHelper.makeQueryRunners(factory)) {
final QueryRunner modifiedRunner = new QueryRunner() {
@Override
public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
TimeseriesQuery tsQuery = (TimeseriesQuery) queryPlus.getQuery();
QueryRunner<ResultRow> newRunner = factory.mergeRunners(Execs.directExecutor(), ImmutableList.of(runner));
QueryToolChest toolChest = factory.getToolchest();
newRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(toolChest.preMergeQueryDecoration(newRunner)), toolChest);
final String timeDimension = tsQuery.getTimestampResultField();
final List<VirtualColumn> virtualColumns = new ArrayList<>(Arrays.asList(tsQuery.getVirtualColumns().getVirtualColumns()));
Map<String, Object> theContext = tsQuery.getContext();
if (timeDimension != null) {
theContext = new HashMap<>(tsQuery.getContext());
final PeriodGranularity granularity = (PeriodGranularity) tsQuery.getGranularity();
virtualColumns.add(new ExpressionVirtualColumn("v0", StringUtils.format("timestamp_floor(__time, '%s')", granularity.getPeriod()), ColumnType.LONG, TestExprMacroTable.INSTANCE));
theContext.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD, timeDimension);
theContext.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_GRANULARITY, granularity);
theContext.put(GroupByQuery.CTX_TIMESTAMP_RESULT_FIELD_INDEX, 0);
}
GroupByQuery newQuery = GroupByQuery.builder().setDataSource(tsQuery.getDataSource()).setQuerySegmentSpec(tsQuery.getQuerySegmentSpec()).setGranularity(tsQuery.getGranularity()).setDimFilter(tsQuery.getDimensionsFilter()).setDimensions(timeDimension == null ? ImmutableList.of() : ImmutableList.of(new DefaultDimensionSpec("v0", timeDimension, ColumnType.LONG))).setAggregatorSpecs(tsQuery.getAggregatorSpecs()).setPostAggregatorSpecs(tsQuery.getPostAggregatorSpecs()).setVirtualColumns(VirtualColumns.create(virtualColumns)).setContext(theContext).build();
return Sequences.map(newRunner.run(queryPlus.withQuery(newQuery), responseContext), new Function<ResultRow, Result<TimeseriesResultValue>>() {
@Override
public Result<TimeseriesResultValue> apply(final ResultRow input) {
final MapBasedRow mapBasedRow = input.toMapBasedRow(newQuery);
return new Result<>(mapBasedRow.getTimestamp(), new TimeseriesResultValue(mapBasedRow.getEvent()));
}
});
}
@Override
public String toString() {
return runner.toString();
}
};
for (boolean vectorize : ImmutableList.of(false, true)) {
// Add vectorization tests for any indexes that support it.
if (!vectorize || QueryRunnerTestHelper.isTestRunnerVectorizable(runner)) {
constructors.add(new Object[] { modifiedRunner, vectorize });
}
}
}
return constructors;
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class BufferHashGrouperTest method testGrowingOverflowingInteger.
@Test
public void testGrowingOverflowingInteger() {
// NullHandling.replaceWithDefault() is true
if (NullHandling.replaceWithDefault()) {
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
// the buffer size below is chosen to test integer overflow in ByteBufferHashTable.adjustTableWhenFull().
final Grouper<Integer> grouper = makeGrouper(columnSelectorFactory, 1_900_000_000, 2, 0.3f);
final int expectedMaxSize = 15323979;
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", 10L)));
for (int i = 0; i < expectedMaxSize; i++) {
Assert.assertTrue(String.valueOf(i), grouper.aggregate(i).isOk());
}
Assert.assertFalse(grouper.aggregate(expectedMaxSize).isOk());
}
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class BufferHashGrouperTest method testNoGrowing.
@Test
public void testNoGrowing() {
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
final Grouper<Integer> grouper = makeGrouper(columnSelectorFactory, 10000, Integer.MAX_VALUE, 0.75f);
final int expectedMaxSize = NullHandling.replaceWithDefault() ? 267 : 258;
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", 10L)));
for (int i = 0; i < expectedMaxSize; i++) {
Assert.assertTrue(String.valueOf(i), grouper.aggregate(i).isOk());
}
Assert.assertFalse(grouper.aggregate(expectedMaxSize).isOk());
// Aggregate slightly different row
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", 11L)));
for (int i = 0; i < expectedMaxSize; i++) {
Assert.assertTrue(String.valueOf(i), grouper.aggregate(i).isOk());
}
Assert.assertFalse(grouper.aggregate(expectedMaxSize).isOk());
final List<Grouper.Entry<Integer>> expected = new ArrayList<>();
for (int i = 0; i < expectedMaxSize; i++) {
expected.add(new Grouper.Entry<>(i, new Object[] { 21L, 2L }));
}
Assert.assertEquals(expected, Lists.newArrayList(grouper.iterator(true)));
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class BufferArrayGrouperTest method testAggregate.
@Test
public void testAggregate() {
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
final IntGrouper grouper = newGrouper(columnSelectorFactory, 32768);
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", 10L)));
grouper.aggregate(12);
grouper.aggregate(6);
grouper.aggregate(10);
grouper.aggregate(6);
grouper.aggregate(12);
grouper.aggregate(6);
final List<Entry<Integer>> expected = ImmutableList.of(new Grouper.Entry<>(6, new Object[] { 30L, 3L }), new Grouper.Entry<>(10, new Object[] { 10L, 1L }), new Grouper.Entry<>(12, new Object[] { 20L, 2L }));
final List<Entry<Integer>> unsortedEntries = Lists.newArrayList(grouper.iterator(false));
Assert.assertEquals(expected, Ordering.from((Comparator<Entry<Integer>>) (o1, o2) -> Ints.compare(o1.getKey(), o2.getKey())).sortedCopy(unsortedEntries));
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class LimitedBufferHashGrouperTest method testAggregateAfterIterated.
@Test
public void testAggregateAfterIterated() {
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("attempted to add offset after grouper was iterated");
final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory();
final LimitedBufferHashGrouper<Integer> grouper = makeGrouper(columnSelectorFactory, 12120);
columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", 10L)));
for (int i = 0; i < NUM_ROWS; i++) {
Assert.assertTrue(String.valueOf(i + KEY_BASE), grouper.aggregate(i + KEY_BASE).isOk());
}
List<Grouper.Entry<Integer>> iterated = Lists.newArrayList(grouper.iterator(true));
Assert.assertEquals(LIMIT, iterated.size());
// an attempt to aggregate with a new key will explode after the grouper has been iterated
grouper.aggregate(KEY_BASE + NUM_ROWS + 1);
}
Aggregations