Search in sources :

Example 1 with IncrementalIndexSegment

use of org.apache.druid.segment.IncrementalIndexSegment in project druid by druid-io.

the class StringColumnAggregationTest method setup.

@Before
public void setup() throws Exception {
    List<String> dimensions = ImmutableList.of(singleValue, multiValue);
    List<InputRow> inputRows = new ArrayList<>(n);
    for (int i = 1; i <= n; i++) {
        String val = String.valueOf(i * 1.0d);
        inputRows.add(new MapBasedInputRow(DateTime.now(DateTimeZone.UTC), dimensions, ImmutableMap.of(singleValue, val, multiValue, Lists.newArrayList(val, null, val))));
    }
    aggregationTestHelper = AggregationTestHelper.createGroupByQueryAggregationTestHelper(Collections.emptyList(), new GroupByQueryConfig(), tempFolder);
    IncrementalIndex index = AggregationTestHelper.createIncrementalIndex(inputRows.iterator(), new NoopInputRowParser(null), new AggregatorFactory[] { new CountAggregatorFactory("count") }, 0, Granularities.NONE, false, 100, false);
    this.segments = ImmutableList.of(new IncrementalIndexSegment(index, SegmentId.dummy("test")), aggregationTestHelper.persistIncrementalIndex(index, null));
    // we have ingested arithmetic progression from 1 to 10, so sums can be computed using following
    // All sum values are multiplied by 2 because we are running query on duplicated segment twice.
    numRows = 2 * n;
    singleValueSum = n * (n + 1);
    multiValueSum = 2 * n * (n + 1);
    singleValueMax = n;
    multiValueMax = n;
    singleValueMin = 1;
    multiValueMin = 1;
}
Also used : GroupByQueryConfig(org.apache.druid.query.groupby.GroupByQueryConfig) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) InputRow(org.apache.druid.data.input.InputRow) ArrayList(java.util.ArrayList) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) NoopInputRowParser(org.apache.druid.data.input.impl.NoopInputRowParser) Before(org.junit.Before)

Example 2 with IncrementalIndexSegment

use of org.apache.druid.segment.IncrementalIndexSegment in project druid by druid-io.

the class FireHydrantTest method setup.

@Before
public void setup() {
    incrementalIndexSegment = new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), SegmentId.dummy("test"));
    queryableIndexSegment = new QueryableIndexSegment(TestIndex.getMMappedTestIndex(), SegmentId.dummy("test"));
    // hydrant starts out with incremental segment loaded
    hydrant = new FireHydrant(incrementalIndexSegment, 0);
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) Before(org.junit.Before)

Example 3 with IncrementalIndexSegment

use of org.apache.druid.segment.IncrementalIndexSegment in project druid by druid-io.

the class SegmentAnalyzerTest method testAnalyzingSegmentWithNonExistentAggregator.

/**
 * This test verifies that if a segment was created using an unknown/invalid aggregator
 * (which can happen if an aggregator was removed for a later version), then,
 * analyzing the segment doesn't fail and the result of analysis of the complex column
 * is reported as an error.
 * @throws IOException
 */
@Test
public void testAnalyzingSegmentWithNonExistentAggregator() throws IOException {
    final URL resource = SegmentAnalyzerTest.class.getClassLoader().getResource("druid.sample.numeric.tsv");
    CharSource source = Resources.asByteSource(resource).asCharSource(StandardCharsets.UTF_8);
    String invalid_aggregator = "invalid_aggregator";
    AggregatorFactory[] metrics = new AggregatorFactory[] { new DoubleSumAggregatorFactory(TestIndex.DOUBLE_METRICS[0], "index"), new HyperUniquesAggregatorFactory("quality_uniques", "quality"), new InvalidAggregatorFactory(invalid_aggregator, "quality") };
    final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withMinTimestamp(DateTimes.of("2011-01-12T00:00:00.000Z").getMillis()).withTimestampSpec(new TimestampSpec("ds", "auto", null)).withDimensionsSpec(TestIndex.DIMENSIONS_SPEC).withMetrics(metrics).withRollup(true).build();
    final IncrementalIndex retVal = new OnheapIncrementalIndex.Builder().setIndexSchema(schema).setMaxRowCount(10000).build();
    IncrementalIndex incrementalIndex = TestIndex.loadIncrementalIndex(retVal, source);
    // Analyze the in-memory segment.
    {
        SegmentAnalyzer analyzer = new SegmentAnalyzer(EnumSet.of(SegmentMetadataQuery.AnalysisType.SIZE));
        IncrementalIndexSegment segment = new IncrementalIndexSegment(incrementalIndex, SegmentId.dummy("ds"));
        Map<String, ColumnAnalysis> analyses = analyzer.analyze(segment);
        ColumnAnalysis columnAnalysis = analyses.get(invalid_aggregator);
        Assert.assertFalse(columnAnalysis.isError());
        Assert.assertEquals("invalid_complex_column_type", columnAnalysis.getType());
        Assert.assertEquals(ColumnType.ofComplex("invalid_complex_column_type"), columnAnalysis.getTypeSignature());
    }
    // Persist the index.
    final File segmentFile = TestIndex.INDEX_MERGER.persist(incrementalIndex, temporaryFolder.newFolder(), TestIndex.INDEX_SPEC, null);
    // Unload the complex serde, then analyze the persisted segment.
    ComplexMetrics.unregisterSerde(InvalidAggregatorFactory.TYPE);
    {
        SegmentAnalyzer analyzer = new SegmentAnalyzer(EnumSet.of(SegmentMetadataQuery.AnalysisType.SIZE));
        QueryableIndexSegment segment = new QueryableIndexSegment(TestIndex.INDEX_IO.loadIndex(segmentFile), SegmentId.dummy("ds"));
        Map<String, ColumnAnalysis> analyses = analyzer.analyze(segment);
        ColumnAnalysis invalidColumnAnalysis = analyses.get(invalid_aggregator);
        Assert.assertTrue(invalidColumnAnalysis.isError());
        Assert.assertEquals("error:unknown_complex_invalid_complex_column_type", invalidColumnAnalysis.getErrorMessage());
        // Run a segment metadata query also to verify it doesn't break
        final List<SegmentAnalysis> results = getSegmentAnalysises(segment, EnumSet.of(SegmentMetadataQuery.AnalysisType.SIZE));
        for (SegmentAnalysis result : results) {
            Assert.assertTrue(result.getColumns().get(invalid_aggregator).isError());
        }
    }
}
Also used : QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) CharSource(com.google.common.io.CharSource) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) ColumnBuilder(org.apache.druid.segment.column.ColumnBuilder) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) URL(java.net.URL) HyperUniquesAggregatorFactory(org.apache.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) List(java.util.List) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) Map(java.util.Map) File(java.io.File) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 4 with IncrementalIndexSegment

use of org.apache.druid.segment.IncrementalIndexSegment in project druid by druid-io.

the class SegmentAnalyzerTest method testIncrementalWorksHelper.

private void testIncrementalWorksHelper(EnumSet<SegmentMetadataQuery.AnalysisType> analyses) {
    final List<SegmentAnalysis> results = getSegmentAnalysises(new IncrementalIndexSegment(TestIndex.getIncrementalTestIndex(), SegmentId.dummy("ds")), analyses);
    Assert.assertEquals(1, results.size());
    final SegmentAnalysis analysis = results.get(0);
    Assert.assertEquals(SegmentId.dummy("ds").toString(), analysis.getId());
    final Map<String, ColumnAnalysis> columns = analysis.getColumns();
    Assert.assertEquals(TestIndex.COLUMNS.length + 3, columns.size());
    for (DimensionSchema schema : TestIndex.DIMENSION_SCHEMAS) {
        final String dimension = schema.getName();
        final ColumnAnalysis columnAnalysis = columns.get(dimension);
        final boolean isString = schema.getColumnType().is(ValueType.STRING);
        Assert.assertEquals(dimension, schema.getColumnType().toString(), columnAnalysis.getType());
        Assert.assertEquals(dimension, 0, columnAnalysis.getSize());
        if (isString) {
            if (analyses == null) {
                Assert.assertTrue(dimension, columnAnalysis.getCardinality() > 0);
            } else {
                Assert.assertEquals(dimension, 0, columnAnalysis.getCardinality().longValue());
            }
        } else {
            Assert.assertNull(dimension, columnAnalysis.getCardinality());
        }
    }
    for (String metric : TestIndex.DOUBLE_METRICS) {
        final ColumnAnalysis columnAnalysis = columns.get(metric);
        Assert.assertEquals(metric, ValueType.DOUBLE.name(), columnAnalysis.getType());
        Assert.assertEquals(metric, 0, columnAnalysis.getSize());
        Assert.assertNull(metric, columnAnalysis.getCardinality());
    }
    for (String metric : TestIndex.FLOAT_METRICS) {
        final ColumnAnalysis columnAnalysis = columns.get(metric);
        Assert.assertEquals(metric, ValueType.FLOAT.name(), columnAnalysis.getType());
        Assert.assertEquals(metric, 0, columnAnalysis.getSize());
        Assert.assertNull(metric, columnAnalysis.getCardinality());
    }
}
Also used : IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) DimensionSchema(org.apache.druid.data.input.impl.DimensionSchema)

Example 5 with IncrementalIndexSegment

use of org.apache.druid.segment.IncrementalIndexSegment in project druid by druid-io.

the class TimeBoundaryQueryRunnerTest method getCustomRunner.

private QueryRunner getCustomRunner() throws IOException {
    CharSource v_0112 = CharSource.wrap(StringUtils.join(V_0112, "\n"));
    CharSource v_0113 = CharSource.wrap(StringUtils.join(V_0113, "\n"));
    IncrementalIndex index0 = TestIndex.loadIncrementalIndex(newIndex("2011-01-12T00:00:00.000Z"), v_0112);
    IncrementalIndex index1 = TestIndex.loadIncrementalIndex(newIndex("2011-01-14T00:00:00.000Z"), v_0113);
    segment0 = new IncrementalIndexSegment(index0, makeIdentifier(index0, "v1"));
    segment1 = new IncrementalIndexSegment(index1, makeIdentifier(index1, "v1"));
    VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = new VersionedIntervalTimeline<>(StringComparators.LEXICOGRAPHIC);
    timeline.add(index0.getInterval(), "v1", new SingleElementPartitionChunk<>(ReferenceCountingSegment.wrapRootGenerationSegment(segment0)));
    timeline.add(index1.getInterval(), "v1", new SingleElementPartitionChunk<>(ReferenceCountingSegment.wrapRootGenerationSegment(segment1)));
    return QueryRunnerTestHelper.makeFilteringQueryRunner(timeline, FACTORY);
}
Also used : ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) CharSource(com.google.common.io.CharSource) IncrementalIndex(org.apache.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(org.apache.druid.segment.incremental.OnheapIncrementalIndex) IncrementalIndexSegment(org.apache.druid.segment.IncrementalIndexSegment) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline)

Aggregations

IncrementalIndexSegment (org.apache.druid.segment.IncrementalIndexSegment)46 QueryableIndexSegment (org.apache.druid.segment.QueryableIndexSegment)28 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)27 Test (org.junit.Test)27 InitializedNullHandlingTest (org.apache.druid.testing.InitializedNullHandlingTest)25 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)21 GroupByQuery (org.apache.druid.query.groupby.GroupByQuery)21 GroupByQueryRunnerTest (org.apache.druid.query.groupby.GroupByQueryRunnerTest)21 ResultRow (org.apache.druid.query.groupby.ResultRow)20 LegacySegmentSpec (org.apache.druid.query.spec.LegacySegmentSpec)20 ExpressionVirtualColumn (org.apache.druid.segment.virtual.ExpressionVirtualColumn)15 IncrementalIndex (org.apache.druid.segment.incremental.IncrementalIndex)14 Result (org.apache.druid.query.Result)10 OnheapIncrementalIndex (org.apache.druid.segment.incremental.OnheapIncrementalIndex)10 ArrayList (java.util.ArrayList)6 QueryRunnerFactory (org.apache.druid.query.QueryRunnerFactory)6 Benchmark (org.openjdk.jmh.annotations.Benchmark)6 BenchmarkMode (org.openjdk.jmh.annotations.BenchmarkMode)6 OutputTimeUnit (org.openjdk.jmh.annotations.OutputTimeUnit)6 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)4