use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testFilterByNull.
@Test
public void testFilterByNull() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).setDimFilter(DimFilters.dimEquals("sally", (String) null)).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(1, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L), row.getEvent());
}
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testObjectColumnSelectorOnVaryingColumnSchema.
@Test
public void testObjectColumnSelectorOnVaryingColumnSchema() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T00:00:00"), Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T01:00:00"), Lists.newArrayList("billy", "sally"), ImmutableMap.of("billy", "hip", "sally", "hop")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).addAggregator(new JavaScriptAggregatorFactory("fieldLength", Arrays.asList("sally", "billy"), "function(current, s, b) { return current + (s == null ? 0 : s.length) + (b == null ? 0 : b.length); }", "function() { return 0; }", "function(a,b) { return a + b; }", JavaScriptConfig.getEnabledInstance())).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(2, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L, "fieldLength", 2.0), row.getEvent());
row = (MapBasedRow) results.get(1);
Assert.assertEquals(ImmutableMap.of("billy", "hip", "sally", "hop", "cnt", 1L, "fieldLength", 6.0), row.getEvent());
}
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class IncrementalIndexStorageAdapterTest method testSanity.
@Test
public void testSanity() throws Exception {
IncrementalIndex index = indexCreator.createIndex();
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {
@Override
public int getMaxIntermediateRows() {
return 5;
}
}), pool);
final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).build(), new IncrementalIndexStorageAdapter(index));
final List<Row> results = rows.toList();
Assert.assertEquals(2, results.size());
MapBasedRow row = (MapBasedRow) results.get(0);
Assert.assertEquals(ImmutableMap.of("sally", "bo", "cnt", 1L), row.getEvent());
row = (MapBasedRow) results.get(1);
Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L), row.getEvent());
}
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class TimeseriesQueryQueryToolChest method getNullTimeseriesResultValue.
private Result<TimeseriesResultValue> getNullTimeseriesResultValue(TimeseriesQuery query) {
List<AggregatorFactory> aggregatorSpecs = query.getAggregatorSpecs();
Aggregator[] aggregators = new Aggregator[aggregatorSpecs.size()];
String[] aggregatorNames = new String[aggregatorSpecs.size()];
RowSignature aggregatorsSignature = RowSignature.builder().addAggregators(aggregatorSpecs, RowSignature.Finalization.UNKNOWN).build();
for (int i = 0; i < aggregatorSpecs.size(); i++) {
aggregators[i] = aggregatorSpecs.get(i).factorize(RowBasedColumnSelectorFactory.create(RowAdapters.standardRow(), () -> new MapBasedRow(null, null), aggregatorsSignature, false));
aggregatorNames[i] = aggregatorSpecs.get(i).getName();
}
final DateTime start = query.getIntervals().isEmpty() ? DateTimes.EPOCH : query.getIntervals().get(0).getStart();
TimeseriesResultBuilder bob = new TimeseriesResultBuilder(start);
for (int i = 0; i < aggregatorSpecs.size(); i++) {
bob.addMetric(aggregatorNames[i], aggregators[i].get());
aggregators[i].close();
}
return bob.build();
}
use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.
the class TestHelper method assertTopNResultValue.
private static void assertTopNResultValue(String msg, Result expected, Result actual) {
TopNResultValue expectedVal = (TopNResultValue) expected.getValue();
TopNResultValue actualVal = (TopNResultValue) actual.getValue();
List<Row> listExpectedRows = expectedVal.getValue().stream().map(dimensionAndMetricValueExtractor -> new MapBasedRow(expected.getTimestamp(), dimensionAndMetricValueExtractor.getBaseObject())).collect(Collectors.toList());
List<Row> listActualRows = actualVal.getValue().stream().map(dimensionAndMetricValueExtractor -> new MapBasedRow(actual.getTimestamp(), dimensionAndMetricValueExtractor.getBaseObject())).collect(Collectors.toList());
Assert.assertEquals("Size of list must match", listExpectedRows.size(), listActualRows.size());
IntStream.range(0, listExpectedRows.size()).forEach(value -> assertRow(StringUtils.format("%s, on value number [%s]", msg, value), listExpectedRows.get(value), listActualRows.get(value)));
}
Aggregations