Search in sources :

Example 11 with MapBasedRow

use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.

the class IncrementalIndexStorageAdapterTest method testFilterByNull.

@Test
public void testFilterByNull() throws Exception {
    IncrementalIndex index = indexCreator.createIndex();
    index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
    index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
    try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
        final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {

            @Override
            public int getMaxIntermediateRows() {
                return 5;
            }
        }), pool);
        final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).setDimFilter(DimFilters.dimEquals("sally", (String) null)).build(), new IncrementalIndexStorageAdapter(index));
        final List<Row> results = rows.toList();
        Assert.assertEquals(1, results.size());
        MapBasedRow row = (MapBasedRow) results.get(0);
        Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L), row.getEvent());
    }
}
Also used : GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) CloseableStupidPool(org.apache.druid.collections.CloseableStupidPool) ByteBuffer(java.nio.ByteBuffer) MapBasedRow(org.apache.druid.data.input.MapBasedRow) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Row(org.apache.druid.data.input.Row) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) GroupByQueryEngine(org.apache.druid.query.groupby.GroupByQueryEngine) Interval(org.joda.time.Interval) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 12 with MapBasedRow

use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.

the class IncrementalIndexStorageAdapterTest method testObjectColumnSelectorOnVaryingColumnSchema.

@Test
public void testObjectColumnSelectorOnVaryingColumnSchema() throws Exception {
    IncrementalIndex index = indexCreator.createIndex();
    index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T00:00:00"), Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
    index.add(new MapBasedInputRow(DateTimes.of("2014-09-01T01:00:00"), Lists.newArrayList("billy", "sally"), ImmutableMap.of("billy", "hip", "sally", "hop")));
    try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
        final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {

            @Override
            public int getMaxIntermediateRows() {
                return 5;
            }
        }), pool);
        final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).addAggregator(new JavaScriptAggregatorFactory("fieldLength", Arrays.asList("sally", "billy"), "function(current, s, b) { return current + (s == null ? 0 : s.length) + (b == null ? 0 : b.length); }", "function() { return 0; }", "function(a,b) { return a + b; }", JavaScriptConfig.getEnabledInstance())).build(), new IncrementalIndexStorageAdapter(index));
        final List<Row> results = rows.toList();
        Assert.assertEquals(2, results.size());
        MapBasedRow row = (MapBasedRow) results.get(0);
        Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L, "fieldLength", 2.0), row.getEvent());
        row = (MapBasedRow) results.get(1);
        Assert.assertEquals(ImmutableMap.of("billy", "hip", "sally", "hop", "cnt", 1L, "fieldLength", 6.0), row.getEvent());
    }
}
Also used : GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) JavaScriptAggregatorFactory(org.apache.druid.query.aggregation.JavaScriptAggregatorFactory) CloseableStupidPool(org.apache.druid.collections.CloseableStupidPool) ByteBuffer(java.nio.ByteBuffer) MapBasedRow(org.apache.druid.data.input.MapBasedRow) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Row(org.apache.druid.data.input.Row) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) GroupByQueryEngine(org.apache.druid.query.groupby.GroupByQueryEngine) Interval(org.joda.time.Interval) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 13 with MapBasedRow

use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.

the class IncrementalIndexStorageAdapterTest method testSanity.

@Test
public void testSanity() throws Exception {
    IncrementalIndex index = indexCreator.createIndex();
    index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("billy"), ImmutableMap.of("billy", "hi")));
    index.add(new MapBasedInputRow(System.currentTimeMillis() - 1, Collections.singletonList("sally"), ImmutableMap.of("sally", "bo")));
    try (CloseableStupidPool<ByteBuffer> pool = new CloseableStupidPool<>("GroupByQueryEngine-bufferPool", () -> ByteBuffer.allocate(50000))) {
        final GroupByQueryEngine engine = new GroupByQueryEngine(Suppliers.ofInstance(new GroupByQueryConfig() {

            @Override
            public int getMaxIntermediateRows() {
                return 5;
            }
        }), pool);
        final Sequence<Row> rows = engine.process(GroupByQuery.builder().setDataSource("test").setGranularity(Granularities.ALL).setInterval(new Interval(DateTimes.EPOCH, DateTimes.nowUtc())).addDimension("billy").addDimension("sally").addAggregator(new LongSumAggregatorFactory("cnt", "cnt")).build(), new IncrementalIndexStorageAdapter(index));
        final List<Row> results = rows.toList();
        Assert.assertEquals(2, results.size());
        MapBasedRow row = (MapBasedRow) results.get(0);
        Assert.assertEquals(ImmutableMap.of("sally", "bo", "cnt", 1L), row.getEvent());
        row = (MapBasedRow) results.get(1);
        Assert.assertEquals(ImmutableMap.of("billy", "hi", "cnt", 1L), row.getEvent());
    }
}
Also used : GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) CloseableStupidPool(org.apache.druid.collections.CloseableStupidPool) ByteBuffer(java.nio.ByteBuffer) MapBasedRow(org.apache.druid.data.input.MapBasedRow) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Row(org.apache.druid.data.input.Row) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) GroupByQueryEngine(org.apache.druid.query.groupby.GroupByQueryEngine) Interval(org.joda.time.Interval) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 14 with MapBasedRow

use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.

the class TimeseriesQueryQueryToolChest method getNullTimeseriesResultValue.

private Result<TimeseriesResultValue> getNullTimeseriesResultValue(TimeseriesQuery query) {
    List<AggregatorFactory> aggregatorSpecs = query.getAggregatorSpecs();
    Aggregator[] aggregators = new Aggregator[aggregatorSpecs.size()];
    String[] aggregatorNames = new String[aggregatorSpecs.size()];
    RowSignature aggregatorsSignature = RowSignature.builder().addAggregators(aggregatorSpecs, RowSignature.Finalization.UNKNOWN).build();
    for (int i = 0; i < aggregatorSpecs.size(); i++) {
        aggregators[i] = aggregatorSpecs.get(i).factorize(RowBasedColumnSelectorFactory.create(RowAdapters.standardRow(), () -> new MapBasedRow(null, null), aggregatorsSignature, false));
        aggregatorNames[i] = aggregatorSpecs.get(i).getName();
    }
    final DateTime start = query.getIntervals().isEmpty() ? DateTimes.EPOCH : query.getIntervals().get(0).getStart();
    TimeseriesResultBuilder bob = new TimeseriesResultBuilder(start);
    for (int i = 0; i < aggregatorSpecs.size(); i++) {
        bob.addMetric(aggregatorNames[i], aggregators[i].get());
        aggregators[i].close();
    }
    return bob.build();
}
Also used : MapBasedRow(org.apache.druid.data.input.MapBasedRow) PostAggregator(org.apache.druid.query.aggregation.PostAggregator) Aggregator(org.apache.druid.query.aggregation.Aggregator) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) RowSignature(org.apache.druid.segment.column.RowSignature) DateTime(org.joda.time.DateTime)

Example 15 with MapBasedRow

use of org.apache.druid.data.input.MapBasedRow in project druid by druid-io.

the class TestHelper method assertTopNResultValue.

private static void assertTopNResultValue(String msg, Result expected, Result actual) {
    TopNResultValue expectedVal = (TopNResultValue) expected.getValue();
    TopNResultValue actualVal = (TopNResultValue) actual.getValue();
    List<Row> listExpectedRows = expectedVal.getValue().stream().map(dimensionAndMetricValueExtractor -> new MapBasedRow(expected.getTimestamp(), dimensionAndMetricValueExtractor.getBaseObject())).collect(Collectors.toList());
    List<Row> listActualRows = actualVal.getValue().stream().map(dimensionAndMetricValueExtractor -> new MapBasedRow(actual.getTimestamp(), dimensionAndMetricValueExtractor.getBaseObject())).collect(Collectors.toList());
    Assert.assertEquals("Size of list must match", listExpectedRows.size(), listActualRows.size());
    IntStream.range(0, listExpectedRows.size()).forEach(value -> assertRow(StringUtils.format("%s, on value number [%s]", msg, value), listExpectedRows.get(value), listActualRows.get(value)));
}
Also used : TopNResultValue(org.apache.druid.query.topn.TopNResultValue) IntStream(java.util.stream.IntStream) InjectableValues(com.fasterxml.jackson.databind.InjectableValues) ComparableList(org.apache.druid.segment.data.ComparableList) AnnotationIntrospector(com.fasterxml.jackson.databind.AnnotationIntrospector) MapBasedRow(org.apache.druid.data.input.MapBasedRow) SegmentWriteOutMediumFactory(org.apache.druid.segment.writeout.SegmentWriteOutMediumFactory) HashMap(java.util.HashMap) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) Row(org.apache.druid.data.input.Row) Lists(com.google.common.collect.Lists) Map(java.util.Map) AnnotatedMember(com.fasterxml.jackson.databind.introspect.AnnotatedMember) ComparableStringArray(org.apache.druid.segment.data.ComparableStringArray) PruneSpecsHolder(org.apache.druid.timeline.DataSegment.PruneSpecsHolder) Sequence(org.apache.druid.java.util.common.guava.Sequence) TopNResultValue(org.apache.druid.query.topn.TopNResultValue) Iterator(java.util.Iterator) ResultRow(org.apache.druid.query.groupby.ResultRow) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DruidSecondaryModule(org.apache.druid.guice.DruidSecondaryModule) StringUtils(org.apache.druid.java.util.common.StringUtils) IOException(java.io.IOException) TestExprMacroTable(org.apache.druid.query.expression.TestExprMacroTable) Collectors(java.util.stream.Collectors) ExprEval(org.apache.druid.math.expr.ExprEval) DefaultObjectMapper(org.apache.druid.jackson.DefaultObjectMapper) ExprMacroTable(org.apache.druid.math.expr.ExprMacroTable) UncheckedIOException(java.io.UncheckedIOException) Result(org.apache.druid.query.Result) List(java.util.List) ColumnConfig(org.apache.druid.segment.column.ColumnConfig) GuiceAnnotationIntrospector(org.apache.druid.guice.GuiceAnnotationIntrospector) Preconditions(com.google.common.base.Preconditions) Assert(org.junit.Assert) MapBasedRow(org.apache.druid.data.input.MapBasedRow) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Row(org.apache.druid.data.input.Row) ResultRow(org.apache.druid.query.groupby.ResultRow)

Aggregations

MapBasedRow (org.apache.druid.data.input.MapBasedRow)65 Test (org.junit.Test)50 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)36 ArrayList (java.util.ArrayList)21 Row (org.apache.druid.data.input.Row)16 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)16 GroupByQueryRunnerTest (org.apache.druid.query.groupby.GroupByQueryRunnerTest)16 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)15 HashMap (java.util.HashMap)13 DimensionSpec (org.apache.druid.query.dimension.DimensionSpec)12 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)10 List (java.util.List)9 ResultRow (org.apache.druid.query.groupby.ResultRow)9 LongMeanAveragerFactory (org.apache.druid.query.movingaverage.averagers.LongMeanAveragerFactory)9 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)8 File (java.io.File)7 ByteBuffer (java.nio.ByteBuffer)6 GroupByQueryConfig (org.apache.druid.query.groupby.GroupByQueryConfig)6 TimeseriesResultValue (org.apache.druid.query.timeseries.TimeseriesResultValue)6 IOException (java.io.IOException)5