Search in sources :

Example 36 with TimestampSpec

use of io.druid.data.input.impl.TimestampSpec in project druid by druid-io.

the class FlattenJSONBenchmarkUtil method getForcedPathParser.

public Parser getForcedPathParser() {
    List<JSONPathFieldSpec> fields = new ArrayList<>();
    fields.add(JSONPathFieldSpec.createNestedField("ts", "$['ts']"));
    fields.add(JSONPathFieldSpec.createNestedField("d1", "$['d1']"));
    fields.add(JSONPathFieldSpec.createNestedField("d2", "$['d2']"));
    fields.add(JSONPathFieldSpec.createNestedField("e1.d1", "$['e1.d1']"));
    fields.add(JSONPathFieldSpec.createNestedField("e1.d2", "$['e1.d2']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.d3", "$['e2.d3']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.d4", "$['e2.d4']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.d5", "$['e2.d5']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.d6", "$['e2.d6']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.ad1[0]", "$['e2.ad1[0]']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.ad1[1]", "$['e2.ad1[1]']"));
    fields.add(JSONPathFieldSpec.createNestedField("e2.ad1[2]", "$['e2.ad1[2]']"));
    fields.add(JSONPathFieldSpec.createNestedField("ae1[0].d1", "$['ae1[0].d1']"));
    fields.add(JSONPathFieldSpec.createNestedField("ae1[1].d1", "$['ae1[1].d1']"));
    fields.add(JSONPathFieldSpec.createNestedField("ae1[2].e1.d2", "$['ae1[2].e1.d2']"));
    fields.add(JSONPathFieldSpec.createNestedField("m3", "$['m3']"));
    fields.add(JSONPathFieldSpec.createNestedField("m4", "$['m4']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.m1", "$['e3.m1']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.m2", "$['e3.m2']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.m3", "$['e3.m3']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.m4", "$['e3.m4']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.am1[0]", "$['e3.am1[0]']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.am1[1]", "$['e3.am1[1]']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.am1[2]", "$['e3.am1[2]']"));
    fields.add(JSONPathFieldSpec.createNestedField("e3.am1[3]", "$['e3.am1[3]']"));
    fields.add(JSONPathFieldSpec.createNestedField("e4.e4.m4", "$['e4.e4.m4']"));
    JSONPathSpec flattenSpec = new JSONPathSpec(false, fields);
    JSONParseSpec spec = new JSONParseSpec(new TimestampSpec("ts", "iso", null), new DimensionsSpec(null, null, null), flattenSpec, null);
    return spec.makeParser();
}
Also used : ArrayList(java.util.ArrayList) TimestampSpec(io.druid.data.input.impl.TimestampSpec) JSONPathSpec(io.druid.data.input.impl.JSONPathSpec) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) JSONPathFieldSpec(io.druid.data.input.impl.JSONPathFieldSpec) JSONParseSpec(io.druid.data.input.impl.JSONParseSpec)

Example 37 with TimestampSpec

use of io.druid.data.input.impl.TimestampSpec in project druid by druid-io.

the class IngestSegmentFirehoseFactoryTest method constructorFeeder.

@Parameterized.Parameters(name = "{1}")
public static Collection<Object[]> constructorFeeder() throws IOException {
    final IndexSpec indexSpec = new IndexSpec();
    final HeapMemoryTaskStorage ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {
    });
    final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(JodaUtils.MIN_INSTANT).withDimensionsSpec(ROW_PARSER).withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory(METRIC_LONG_NAME, DIM_LONG_NAME), new DoubleSumAggregatorFactory(METRIC_FLOAT_NAME, DIM_FLOAT_NAME) }).build();
    final OnheapIncrementalIndex index = new OnheapIncrementalIndex(schema, true, MAX_ROWS * MAX_SHARD_NUMBER);
    for (Integer i = 0; i < MAX_ROWS; ++i) {
        index.add(ROW_PARSER.parse(buildRow(i.longValue())));
    }
    if (!persistDir.mkdirs() && !persistDir.exists()) {
        throw new IOException(String.format("Could not create directory at [%s]", persistDir.getAbsolutePath()));
    }
    INDEX_MERGER.persist(index, persistDir, indexSpec);
    final TaskLockbox tl = new TaskLockbox(ts);
    final IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(null, null, null) {

        private final Set<DataSegment> published = Sets.newHashSet();

        private final Set<DataSegment> nuked = Sets.newHashSet();

        @Override
        public List<DataSegment> getUsedSegmentsForInterval(String dataSource, Interval interval) throws IOException {
            return ImmutableList.copyOf(segmentSet);
        }

        @Override
        public List<DataSegment> getUsedSegmentsForIntervals(String dataSource, List<Interval> interval) throws IOException {
            return ImmutableList.copyOf(segmentSet);
        }

        @Override
        public List<DataSegment> getUnusedSegmentsForInterval(String dataSource, Interval interval) {
            return ImmutableList.of();
        }

        @Override
        public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) {
            Set<DataSegment> added = Sets.newHashSet();
            for (final DataSegment segment : segments) {
                if (published.add(segment)) {
                    added.add(segment);
                }
            }
            return ImmutableSet.copyOf(added);
        }

        @Override
        public void deleteSegments(Set<DataSegment> segments) {
            nuked.addAll(segments);
        }
    };
    final LocalTaskActionClientFactory tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tl, mdc, newMockEmitter(), EasyMock.createMock(SupervisorManager.class)));
    SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
    EasyMock.replay(notifierFactory);
    final TaskToolboxFactory taskToolboxFactory = new TaskToolboxFactory(new TaskConfig(tmpDir.getAbsolutePath(), null, null, 50000, null, false, null, null), tac, newMockEmitter(), new DataSegmentPusher() {

        @Deprecated
        @Override
        public String getPathForHadoop(String dataSource) {
            return getPathForHadoop();
        }

        @Override
        public String getPathForHadoop() {
            throw new UnsupportedOperationException();
        }

        @Override
        public DataSegment push(File file, DataSegment segment) throws IOException {
            return segment;
        }
    }, new DataSegmentKiller() {

        @Override
        public void kill(DataSegment segments) throws SegmentLoadingException {
        }

        @Override
        public void killAll() throws IOException {
            throw new UnsupportedOperationException("not implemented");
        }
    }, new DataSegmentMover() {

        @Override
        public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) throws SegmentLoadingException {
            return dataSegment;
        }
    }, new DataSegmentArchiver() {

        @Override
        public DataSegment archive(DataSegment segment) throws SegmentLoadingException {
            return segment;
        }

        @Override
        public DataSegment restore(DataSegment segment) throws SegmentLoadingException {
            return segment;
        }
    }, // segment announcer
    null, notifierFactory, // query runner factory conglomerate corporation unionized collective
    null, // query executor service
    null, // monitor scheduler
    null, new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {

        @Override
        public List<StorageLocationConfig> getLocations() {
            return Lists.newArrayList();
        }
    }, MAPPER)), MAPPER, INDEX_MERGER, INDEX_IO, null, null, INDEX_MERGER_V9);
    Collection<Object[]> values = new LinkedList<>();
    for (InputRowParser parser : Arrays.<InputRowParser>asList(ROW_PARSER, new MapInputRowParser(new JSONParseSpec(new TimestampSpec(TIME_COLUMN, "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.<String>of()), ImmutableList.of(DIM_FLOAT_NAME, DIM_LONG_NAME), ImmutableList.<SpatialDimensionSchema>of()), null, null)))) {
        for (List<String> dim_names : Arrays.<List<String>>asList(null, ImmutableList.of(DIM_NAME))) {
            for (List<String> metric_names : Arrays.<List<String>>asList(null, ImmutableList.of(METRIC_LONG_NAME, METRIC_FLOAT_NAME))) {
                values.add(new Object[] { new IngestSegmentFirehoseFactory(DATA_SOURCE_NAME, FOREVER, new SelectorDimFilter(DIM_NAME, DIM_VALUE, null), dim_names, metric_names, Guice.createInjector(new Module() {

                    @Override
                    public void configure(Binder binder) {
                        binder.bind(TaskToolboxFactory.class).toInstance(taskToolboxFactory);
                    }
                }), INDEX_IO), String.format("DimNames[%s]MetricNames[%s]ParserDimNames[%s]", dim_names == null ? "null" : "dims", metric_names == null ? "null" : "metrics", parser == ROW_PARSER ? "dims" : "null"), parser });
            }
        }
    }
    return values;
}
Also used : DataSegmentArchiver(io.druid.segment.loading.DataSegmentArchiver) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DataSegmentMover(io.druid.segment.loading.DataSegmentMover) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) LinkedList(java.util.LinkedList) LocalTaskActionClientFactory(io.druid.indexing.common.actions.LocalTaskActionClientFactory) SegmentLoaderConfig(io.druid.segment.loading.SegmentLoaderConfig) SegmentLoaderFactory(io.druid.indexing.common.SegmentLoaderFactory) SegmentLoaderLocalCacheManager(io.druid.segment.loading.SegmentLoaderLocalCacheManager) JSONParseSpec(io.druid.data.input.impl.JSONParseSpec) IndexerSQLMetadataStorageCoordinator(io.druid.metadata.IndexerSQLMetadataStorageCoordinator) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) LinkedList(java.util.LinkedList) MapInputRowParser(io.druid.data.input.impl.MapInputRowParser) InputRowParser(io.druid.data.input.impl.InputRowParser) Module(com.google.inject.Module) SimpleModule(com.fasterxml.jackson.databind.module.SimpleModule) File(java.io.File) Interval(org.joda.time.Interval) IndexSpec(io.druid.segment.IndexSpec) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) MapInputRowParser(io.druid.data.input.impl.MapInputRowParser) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) DataSegmentKiller(io.druid.segment.loading.DataSegmentKiller) TaskConfig(io.druid.indexing.common.config.TaskConfig) DataSegment(io.druid.timeline.DataSegment) Binder(com.google.inject.Binder) TaskToolboxFactory(io.druid.indexing.common.TaskToolboxFactory) SelectorDimFilter(io.druid.query.filter.SelectorDimFilter) TaskActionToolbox(io.druid.indexing.common.actions.TaskActionToolbox) TimestampSpec(io.druid.data.input.impl.TimestampSpec) IncrementalIndexSchema(io.druid.segment.incremental.IncrementalIndexSchema) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) TaskStorageConfig(io.druid.indexing.common.config.TaskStorageConfig) IOException(java.io.IOException) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) SegmentHandoffNotifierFactory(io.druid.segment.realtime.plumber.SegmentHandoffNotifierFactory) SpatialDimensionSchema(io.druid.data.input.impl.SpatialDimensionSchema) TaskLockbox(io.druid.indexing.overlord.TaskLockbox) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec)

Example 38 with TimestampSpec

use of io.druid.data.input.impl.TimestampSpec in project druid by druid-io.

the class SegmentMetadataQueryTest method testSegmentMetadataQueryWithTimestampSpecMerge.

@Test
public void testSegmentMetadataQueryWithTimestampSpecMerge() {
    SegmentAnalysis mergedSegmentAnalysis = new SegmentAnalysis(differentIds ? "merged" : "testSegment", null, ImmutableMap.of("placement", new ColumnAnalysis(ValueType.STRING.toString(), false, 0, 0, null, null, null)), 0, expectedSegmentAnalysis1.getNumRows() + expectedSegmentAnalysis2.getNumRows(), null, new TimestampSpec("ds", "auto", null), null, null);
    QueryToolChest toolChest = FACTORY.getToolchest();
    ExecutorService exec = Executors.newCachedThreadPool();
    QueryRunner myRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(FACTORY.mergeRunners(MoreExecutors.sameThreadExecutor(), Lists.<QueryRunner<SegmentAnalysis>>newArrayList(toolChest.preMergeQueryDecoration(runner1), toolChest.preMergeQueryDecoration(runner2)))), toolChest);
    TestHelper.assertExpectedObjects(ImmutableList.of(mergedSegmentAnalysis), myRunner.run(Druids.newSegmentMetadataQueryBuilder().dataSource("testing").intervals("2013/2014").toInclude(new ListColumnIncluderator(Arrays.asList("placement"))).analysisTypes(SegmentMetadataQuery.AnalysisType.TIMESTAMPSPEC).merge(true).build(), Maps.newHashMap()), "failed SegmentMetadata merging query");
    exec.shutdownNow();
}
Also used : FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) ListColumnIncluderator(io.druid.query.metadata.metadata.ListColumnIncluderator) ColumnAnalysis(io.druid.query.metadata.metadata.ColumnAnalysis) TimestampSpec(io.druid.data.input.impl.TimestampSpec) ExecutorService(java.util.concurrent.ExecutorService) SegmentAnalysis(io.druid.query.metadata.metadata.SegmentAnalysis) QueryToolChest(io.druid.query.QueryToolChest) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) QueryRunner(io.druid.query.QueryRunner) Test(org.junit.Test)

Example 39 with TimestampSpec

use of io.druid.data.input.impl.TimestampSpec in project druid by druid-io.

the class SegmentMetadataQueryQueryToolChest method mergeAnalyses.

@VisibleForTesting
public static SegmentAnalysis mergeAnalyses(final SegmentAnalysis arg1, final SegmentAnalysis arg2, boolean lenientAggregatorMerge) {
    if (arg1 == null) {
        return arg2;
    }
    if (arg2 == null) {
        return arg1;
    }
    List<Interval> newIntervals = null;
    if (arg1.getIntervals() != null) {
        newIntervals = Lists.newArrayList();
        newIntervals.addAll(arg1.getIntervals());
    }
    if (arg2.getIntervals() != null) {
        if (newIntervals == null) {
            newIntervals = Lists.newArrayList();
        }
        newIntervals.addAll(arg2.getIntervals());
    }
    final Map<String, ColumnAnalysis> leftColumns = arg1.getColumns();
    final Map<String, ColumnAnalysis> rightColumns = arg2.getColumns();
    Map<String, ColumnAnalysis> columns = Maps.newTreeMap();
    Set<String> rightColumnNames = Sets.newHashSet(rightColumns.keySet());
    for (Map.Entry<String, ColumnAnalysis> entry : leftColumns.entrySet()) {
        final String columnName = entry.getKey();
        columns.put(columnName, entry.getValue().fold(rightColumns.get(columnName)));
        rightColumnNames.remove(columnName);
    }
    for (String columnName : rightColumnNames) {
        columns.put(columnName, rightColumns.get(columnName));
    }
    final Map<String, AggregatorFactory> aggregators = Maps.newHashMap();
    if (lenientAggregatorMerge) {
        // Merge each aggregator individually, ignoring nulls
        for (SegmentAnalysis analysis : ImmutableList.of(arg1, arg2)) {
            if (analysis.getAggregators() != null) {
                for (Map.Entry<String, AggregatorFactory> entry : analysis.getAggregators().entrySet()) {
                    final String aggregatorName = entry.getKey();
                    final AggregatorFactory aggregator = entry.getValue();
                    AggregatorFactory merged = aggregators.get(aggregatorName);
                    if (merged != null) {
                        try {
                            merged = merged.getMergingFactory(aggregator);
                        } catch (AggregatorFactoryNotMergeableException e) {
                            merged = null;
                        }
                    } else {
                        merged = aggregator;
                    }
                    aggregators.put(aggregatorName, merged);
                }
            }
        }
    } else {
        final AggregatorFactory[] aggs1 = arg1.getAggregators() != null ? arg1.getAggregators().values().toArray(new AggregatorFactory[arg1.getAggregators().size()]) : null;
        final AggregatorFactory[] aggs2 = arg2.getAggregators() != null ? arg2.getAggregators().values().toArray(new AggregatorFactory[arg2.getAggregators().size()]) : null;
        final AggregatorFactory[] merged = AggregatorFactory.mergeAggregators(Arrays.asList(aggs1, aggs2));
        if (merged != null) {
            for (AggregatorFactory aggregator : merged) {
                aggregators.put(aggregator.getName(), aggregator);
            }
        }
    }
    final TimestampSpec timestampSpec = TimestampSpec.mergeTimestampSpec(Lists.newArrayList(arg1.getTimestampSpec(), arg2.getTimestampSpec()));
    final Granularity queryGranularity = Granularity.mergeGranularities(Lists.newArrayList(arg1.getQueryGranularity(), arg2.getQueryGranularity()));
    final String mergedId;
    if (arg1.getId() != null && arg2.getId() != null && arg1.getId().equals(arg2.getId())) {
        mergedId = arg1.getId();
    } else {
        mergedId = "merged";
    }
    final Boolean rollup;
    if (arg1.isRollup() != null && arg2.isRollup() != null && arg1.isRollup().equals(arg2.isRollup())) {
        rollup = arg1.isRollup();
    } else {
        rollup = null;
    }
    return new SegmentAnalysis(mergedId, newIntervals, columns, arg1.getSize() + arg2.getSize(), arg1.getNumRows() + arg2.getNumRows(), aggregators.isEmpty() ? null : aggregators, timestampSpec, queryGranularity, rollup);
}
Also used : AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) Granularity(io.druid.java.util.common.granularity.Granularity) AggregatorFactoryNotMergeableException(io.druid.query.aggregation.AggregatorFactoryNotMergeableException) ColumnAnalysis(io.druid.query.metadata.metadata.ColumnAnalysis) TimestampSpec(io.druid.data.input.impl.TimestampSpec) SegmentAnalysis(io.druid.query.metadata.metadata.SegmentAnalysis) Map(java.util.Map) Interval(org.joda.time.Interval) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 40 with TimestampSpec

use of io.druid.data.input.impl.TimestampSpec in project druid by druid-io.

the class SegmentMetadataQueryRunnerFactory method createRunner.

@Override
public QueryRunner<SegmentAnalysis> createRunner(final Segment segment) {
    return new QueryRunner<SegmentAnalysis>() {

        @Override
        public Sequence<SegmentAnalysis> run(Query<SegmentAnalysis> inQ, Map<String, Object> responseContext) {
            SegmentMetadataQuery query = (SegmentMetadataQuery) inQ;
            final SegmentAnalyzer analyzer = new SegmentAnalyzer(query.getAnalysisTypes());
            final Map<String, ColumnAnalysis> analyzedColumns = analyzer.analyze(segment);
            final long numRows = analyzer.numRows(segment);
            long totalSize = 0;
            if (analyzer.analyzingSize()) {
                // Initialize with the size of the whitespace, 1 byte per
                totalSize = analyzedColumns.size() * numRows;
            }
            Map<String, ColumnAnalysis> columns = Maps.newTreeMap();
            ColumnIncluderator includerator = query.getToInclude();
            for (Map.Entry<String, ColumnAnalysis> entry : analyzedColumns.entrySet()) {
                final String columnName = entry.getKey();
                final ColumnAnalysis column = entry.getValue();
                if (!column.isError()) {
                    totalSize += column.getSize();
                }
                if (includerator.include(columnName)) {
                    columns.put(columnName, column);
                }
            }
            List<Interval> retIntervals = query.analyzingInterval() ? Arrays.asList(segment.getDataInterval()) : null;
            final Map<String, AggregatorFactory> aggregators;
            Metadata metadata = null;
            if (query.hasAggregators()) {
                metadata = segment.asStorageAdapter().getMetadata();
                if (metadata != null && metadata.getAggregators() != null) {
                    aggregators = Maps.newHashMap();
                    for (AggregatorFactory aggregator : metadata.getAggregators()) {
                        aggregators.put(aggregator.getName(), aggregator);
                    }
                } else {
                    aggregators = null;
                }
            } else {
                aggregators = null;
            }
            final TimestampSpec timestampSpec;
            if (query.hasTimestampSpec()) {
                if (metadata == null) {
                    metadata = segment.asStorageAdapter().getMetadata();
                }
                timestampSpec = metadata != null ? metadata.getTimestampSpec() : null;
            } else {
                timestampSpec = null;
            }
            final Granularity queryGranularity;
            if (query.hasQueryGranularity()) {
                if (metadata == null) {
                    metadata = segment.asStorageAdapter().getMetadata();
                }
                queryGranularity = metadata != null ? metadata.getQueryGranularity() : null;
            } else {
                queryGranularity = null;
            }
            Boolean rollup = null;
            if (query.hasRollup()) {
                if (metadata == null) {
                    metadata = segment.asStorageAdapter().getMetadata();
                }
                rollup = metadata != null ? metadata.isRollup() : null;
                if (rollup == null) {
                    // in this case, this segment is built before no-rollup function is coded,
                    // thus it is built with rollup
                    rollup = Boolean.TRUE;
                }
            }
            return Sequences.simple(Arrays.asList(new SegmentAnalysis(segment.getIdentifier(), retIntervals, columns, totalSize, numRows, aggregators, timestampSpec, queryGranularity, rollup)));
        }
    };
}
Also used : BaseQuery(io.druid.query.BaseQuery) SegmentMetadataQuery(io.druid.query.metadata.metadata.SegmentMetadataQuery) Query(io.druid.query.Query) Metadata(io.druid.segment.Metadata) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) Granularity(io.druid.java.util.common.granularity.Granularity) ColumnIncluderator(io.druid.query.metadata.metadata.ColumnIncluderator) QueryRunner(io.druid.query.QueryRunner) ConcatQueryRunner(io.druid.query.ConcatQueryRunner) SegmentMetadataQuery(io.druid.query.metadata.metadata.SegmentMetadataQuery) ColumnAnalysis(io.druid.query.metadata.metadata.ColumnAnalysis) TimestampSpec(io.druid.data.input.impl.TimestampSpec) SegmentAnalysis(io.druid.query.metadata.metadata.SegmentAnalysis) Map(java.util.Map) Interval(org.joda.time.Interval)

Aggregations

TimestampSpec (io.druid.data.input.impl.TimestampSpec)40 DimensionsSpec (io.druid.data.input.impl.DimensionsSpec)31 JSONParseSpec (io.druid.data.input.impl.JSONParseSpec)16 StringInputRowParser (io.druid.data.input.impl.StringInputRowParser)16 Test (org.junit.Test)15 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)12 Map (java.util.Map)11 DataSchema (io.druid.segment.indexing.DataSchema)10 UniformGranularitySpec (io.druid.segment.indexing.granularity.UniformGranularitySpec)10 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)9 DoubleSumAggregatorFactory (io.druid.query.aggregation.DoubleSumAggregatorFactory)8 DateTime (org.joda.time.DateTime)8 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)7 ArrayList (java.util.ArrayList)7 CSVParseSpec (io.druid.data.input.impl.CSVParseSpec)6 StringDimensionSchema (io.druid.data.input.impl.StringDimensionSchema)6 TimeAndDimsParseSpec (io.druid.data.input.impl.TimeAndDimsParseSpec)6 OnheapIncrementalIndex (io.druid.segment.incremental.OnheapIncrementalIndex)6 InputRowParser (io.druid.data.input.impl.InputRowParser)5 MapInputRowParser (io.druid.data.input.impl.MapInputRowParser)5