Search in sources :

Example 11 with StringInputRowParser

use of io.druid.data.input.impl.StringInputRowParser in project druid by druid-io.

the class BatchDeltaIngestionTest method makeHadoopDruidIndexerConfig.

private HadoopDruidIndexerConfig makeHadoopDruidIndexerConfig(Map<String, Object> inputSpec, File tmpDir) throws Exception {
    HadoopDruidIndexerConfig config = new HadoopDruidIndexerConfig(new HadoopIngestionSpec(new DataSchema("website", MAPPER.convertValue(new StringInputRowParser(new CSVParseSpec(new TimestampSpec("timestamp", "yyyyMMddHH", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("host")), null, null), null, ImmutableList.of("timestamp", "host", "host2", "visited_num")), null), Map.class), new AggregatorFactory[] { new LongSumAggregatorFactory("visited_sum", "visited_num"), new HyperUniquesAggregatorFactory("unique_hosts", "host2") }, new UniformGranularitySpec(Granularities.DAY, Granularities.NONE, ImmutableList.of(INTERVAL_FULL)), MAPPER), new HadoopIOConfig(inputSpec, null, tmpDir.getCanonicalPath()), new HadoopTuningConfig(tmpDir.getCanonicalPath(), null, null, null, null, null, false, false, false, false, null, false, false, null, null, null, false, false)));
    config.setShardSpecs(ImmutableMap.<Long, List<HadoopyShardSpec>>of(INTERVAL_FULL.getStartMillis(), ImmutableList.of(new HadoopyShardSpec(new HashBasedNumberedShardSpec(0, 1, null, HadoopDruidIndexerConfig.JSON_MAPPER), 0))));
    config = HadoopDruidIndexerConfig.fromSpec(config.getSchema());
    return config;
}
Also used : HashBasedNumberedShardSpec(io.druid.timeline.partition.HashBasedNumberedShardSpec) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) DataSchema(io.druid.segment.indexing.DataSchema) UniformGranularitySpec(io.druid.segment.indexing.granularity.UniformGranularitySpec) CSVParseSpec(io.druid.data.input.impl.CSVParseSpec) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) TimestampSpec(io.druid.data.input.impl.TimestampSpec) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 12 with StringInputRowParser

use of io.druid.data.input.impl.StringInputRowParser in project druid by druid-io.

the class AggregationTestHelper method createIndex.

public void createIndex(Iterator rows, InputRowParser parser, final AggregatorFactory[] metrics, File outDir, long minTimestamp, Granularity gran, boolean deserializeComplexMetrics, int maxRowCount) throws Exception {
    IncrementalIndex index = null;
    List<File> toMerge = new ArrayList<>();
    try {
        index = new OnheapIncrementalIndex(minTimestamp, gran, metrics, deserializeComplexMetrics, true, true, maxRowCount);
        while (rows.hasNext()) {
            Object row = rows.next();
            if (!index.canAppendRow()) {
                File tmp = tempFolder.newFolder();
                toMerge.add(tmp);
                indexMerger.persist(index, tmp, new IndexSpec());
                index.close();
                index = new OnheapIncrementalIndex(minTimestamp, gran, metrics, deserializeComplexMetrics, true, true, maxRowCount);
            }
            if (row instanceof String && parser instanceof StringInputRowParser) {
                //Note: this is required because StringInputRowParser is InputRowParser<ByteBuffer> as opposed to
                //InputRowsParser<String>
                index.add(((StringInputRowParser) parser).parse((String) row));
            } else {
                index.add(parser.parse(row));
            }
        }
        if (toMerge.size() > 0) {
            File tmp = tempFolder.newFolder();
            toMerge.add(tmp);
            indexMerger.persist(index, tmp, new IndexSpec());
            List<QueryableIndex> indexes = new ArrayList<>(toMerge.size());
            for (File file : toMerge) {
                indexes.add(indexIO.loadIndex(file));
            }
            indexMerger.mergeQueryableIndex(indexes, true, metrics, outDir, new IndexSpec());
            for (QueryableIndex qi : indexes) {
                qi.close();
            }
        } else {
            indexMerger.persist(index, outDir, new IndexSpec());
        }
    } finally {
        if (index != null) {
            index.close();
        }
    }
}
Also used : IndexSpec(io.druid.segment.IndexSpec) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) QueryableIndex(io.druid.segment.QueryableIndex) ArrayList(java.util.ArrayList) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) File(java.io.File)

Example 13 with StringInputRowParser

use of io.druid.data.input.impl.StringInputRowParser in project druid by druid-io.

the class IngestSegmentFirehoseTest method createTestIndex.

private void createTestIndex(File segmentDir) throws Exception {
    List<String> rows = Lists.newArrayList("2014102200,host1,10", "2014102200,host2,20", "2014102200,host3,30", "2014102201,host1,10", "2014102201,host2,20", "2014102201,host3,30", "2014102202,host1,10", "2014102202,host2,20", "2014102202,host3,30");
    StringInputRowParser parser = new StringInputRowParser(new CSVParseSpec(new TimestampSpec("timestamp", "yyyyMMddHH", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("host")), null, null), null, ImmutableList.of("timestamp", "host", "visited")), Charsets.UTF_8.toString());
    AggregatorFactory[] aggregators = new AggregatorFactory[] { new LongSumAggregatorFactory("visited_sum", "visited") };
    IncrementalIndex index = null;
    try {
        index = new OnheapIncrementalIndex(0, Granularities.NONE, aggregators, true, true, true, 5000);
        for (String line : rows) {
            index.add(parser.parse(line));
        }
        indexMerger.persist(index, segmentDir, new IndexSpec());
    } finally {
        if (index != null) {
            index.close();
        }
    }
}
Also used : IndexSpec(io.druid.segment.IndexSpec) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) CSVParseSpec(io.druid.data.input.impl.CSVParseSpec) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) TimestampSpec(io.druid.data.input.impl.TimestampSpec) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory)

Example 14 with StringInputRowParser

use of io.druid.data.input.impl.StringInputRowParser in project druid by druid-io.

the class FireDepartmentTest method testSerde.

@Test
public void testSerde() throws Exception {
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.setInjectableValues(new InjectableValues.Std().addValue(ObjectMapper.class, jsonMapper));
    FireDepartment schema = new FireDepartment(new DataSchema("foo", jsonMapper.convertValue(new StringInputRowParser(new JSONParseSpec(new TimestampSpec("timestamp", "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList("dim1", "dim2")), null, null), null, null), null), Map.class), new AggregatorFactory[] { new CountAggregatorFactory("count") }, new UniformGranularitySpec(Granularities.HOUR, Granularities.MINUTE, null), jsonMapper), new RealtimeIOConfig(null, new RealtimePlumberSchool(null, null, null, null, null, null, null, TestHelper.getTestIndexMerger(), TestHelper.getTestIndexMergerV9(), TestHelper.getTestIndexIO(), MapCache.create(0), NO_CACHE_CONFIG, TestHelper.getObjectMapper()), null), RealtimeTuningConfig.makeDefaultTuningConfig(new File("/tmp/nonexistent")));
    String json = jsonMapper.writeValueAsString(schema);
    FireDepartment newSchema = jsonMapper.readValue(json, FireDepartment.class);
    Assert.assertEquals(schema.getDataSchema().getDataSource(), newSchema.getDataSchema().getDataSource());
    Assert.assertEquals("/tmp/nonexistent", schema.getTuningConfig().getBasePersistDirectory().toString());
}
Also used : RealtimeIOConfig(io.druid.segment.indexing.RealtimeIOConfig) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DataSchema(io.druid.segment.indexing.DataSchema) UniformGranularitySpec(io.druid.segment.indexing.granularity.UniformGranularitySpec) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) RealtimePlumberSchool(io.druid.segment.realtime.plumber.RealtimePlumberSchool) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) TimestampSpec(io.druid.data.input.impl.TimestampSpec) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) JSONParseSpec(io.druid.data.input.impl.JSONParseSpec) File(java.io.File) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Test(org.junit.Test)

Example 15 with StringInputRowParser

use of io.druid.data.input.impl.StringInputRowParser in project druid by druid-io.

the class DataSchemaTest method testDefaultExclusions.

@Test
public void testDefaultExclusions() throws Exception {
    Map<String, Object> parser = jsonMapper.convertValue(new StringInputRowParser(new JSONParseSpec(new TimestampSpec("time", "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("dimB", "dimA")), null, null), null, null), null), new TypeReference<Map<String, Object>>() {
    });
    DataSchema schema = new DataSchema("test", parser, new AggregatorFactory[] { new DoubleSumAggregatorFactory("metric1", "col1"), new DoubleSumAggregatorFactory("metric2", "col2") }, new ArbitraryGranularitySpec(Granularities.DAY, ImmutableList.of(Interval.parse("2014/2015"))), jsonMapper);
    Assert.assertEquals(ImmutableSet.of("time", "col1", "col2", "metric1", "metric2"), schema.getParser().getParseSpec().getDimensionsSpec().getDimensionExclusions());
}
Also used : DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) TimestampSpec(io.druid.data.input.impl.TimestampSpec) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) JSONParseSpec(io.druid.data.input.impl.JSONParseSpec) Map(java.util.Map) ArbitraryGranularitySpec(io.druid.segment.indexing.granularity.ArbitraryGranularitySpec) Test(org.junit.Test)

Aggregations

StringInputRowParser (io.druid.data.input.impl.StringInputRowParser)19 DimensionsSpec (io.druid.data.input.impl.DimensionsSpec)16 TimestampSpec (io.druid.data.input.impl.TimestampSpec)16 JSONParseSpec (io.druid.data.input.impl.JSONParseSpec)8 Map (java.util.Map)8 DataSchema (io.druid.segment.indexing.DataSchema)7 UniformGranularitySpec (io.druid.segment.indexing.granularity.UniformGranularitySpec)7 CSVParseSpec (io.druid.data.input.impl.CSVParseSpec)6 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)5 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)5 DoubleSumAggregatorFactory (io.druid.query.aggregation.DoubleSumAggregatorFactory)5 Test (org.junit.Test)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)4 OnheapIncrementalIndex (io.druid.segment.incremental.OnheapIncrementalIndex)4 ArbitraryGranularitySpec (io.druid.segment.indexing.granularity.ArbitraryGranularitySpec)4 Before (org.junit.Before)4 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)3 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)3 HyperUniquesAggregatorFactory (io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory)3