Search in sources :

Example 6 with InputRowParser

use of org.apache.druid.data.input.impl.InputRowParser in project hive by apache.

the class DruidStorageHandler method updateKafkaIngestion.

private void updateKafkaIngestion(Table table) {
    final String overlordAddress = HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_DRUID_OVERLORD_DEFAULT_ADDRESS);
    final String dataSourceName = Preconditions.checkNotNull(DruidStorageHandlerUtils.getTableProperty(table, Constants.DRUID_DATA_SOURCE), "Druid datasource name is null");
    final String kafkaTopic = Preconditions.checkNotNull(DruidStorageHandlerUtils.getTableProperty(table, DruidConstants.KAFKA_TOPIC), "kafka topic is null");
    final String kafkaServers = Preconditions.checkNotNull(DruidStorageHandlerUtils.getTableProperty(table, DruidConstants.KAFKA_BOOTSTRAP_SERVERS), "kafka connect string is null");
    Properties tableProperties = new Properties();
    tableProperties.putAll(table.getParameters());
    final GranularitySpec granularitySpec = DruidStorageHandlerUtils.getGranularitySpec(getConf(), tableProperties);
    List<FieldSchema> columns = table.getSd().getCols();
    List<String> columnNames = new ArrayList<>(columns.size());
    List<TypeInfo> columnTypes = new ArrayList<>(columns.size());
    for (FieldSchema schema : columns) {
        columnNames.add(schema.getName());
        columnTypes.add(TypeInfoUtils.getTypeInfoFromTypeString(schema.getType()));
    }
    Pair<List<DimensionSchema>, AggregatorFactory[]> dimensionsAndAggregates = DruidStorageHandlerUtils.getDimensionsAndAggregates(columnNames, columnTypes);
    if (!columnNames.contains(DruidConstants.DEFAULT_TIMESTAMP_COLUMN)) {
        throw new IllegalStateException("Timestamp column (' " + DruidConstants.DEFAULT_TIMESTAMP_COLUMN + "') not specified in create table; list of columns is : " + columnNames);
    }
    DimensionsSpec dimensionsSpec = new DimensionsSpec(dimensionsAndAggregates.lhs, null, null);
    String timestampFormat = DruidStorageHandlerUtils.getTableProperty(table, DruidConstants.DRUID_TIMESTAMP_FORMAT);
    String timestampColumnName = DruidStorageHandlerUtils.getTableProperty(table, DruidConstants.DRUID_TIMESTAMP_COLUMN);
    if (timestampColumnName == null) {
        timestampColumnName = DruidConstants.DEFAULT_TIMESTAMP_COLUMN;
    }
    final TimestampSpec timestampSpec = new TimestampSpec(timestampColumnName, timestampFormat, null);
    final InputRowParser inputRowParser = DruidKafkaUtils.getInputRowParser(table, timestampSpec, dimensionsSpec);
    final Map<String, Object> inputParser = JSON_MAPPER.convertValue(inputRowParser, new TypeReference<Map<String, Object>>() {
    });
    final DataSchema dataSchema = new DataSchema(dataSourceName, inputParser, dimensionsAndAggregates.rhs, granularitySpec, null, DruidStorageHandlerUtils.JSON_MAPPER);
    IndexSpec indexSpec = DruidStorageHandlerUtils.getIndexSpec(getConf());
    KafkaSupervisorSpec spec = DruidKafkaUtils.createKafkaSupervisorSpec(table, kafkaTopic, kafkaServers, dataSchema, indexSpec);
    // Fetch existing Ingestion Spec from Druid, if any
    KafkaSupervisorSpec existingSpec = fetchKafkaIngestionSpec(table);
    String targetState = DruidStorageHandlerUtils.getTableProperty(table, DruidConstants.DRUID_KAFKA_INGESTION);
    if (targetState == null) {
        // Case when user has not specified any ingestion state in the current command
        // if there is a kafka supervisor running then keep it last known state is START otherwise STOP.
        targetState = existingSpec == null ? "STOP" : "START";
    }
    if ("STOP".equalsIgnoreCase(targetState)) {
        if (existingSpec != null) {
            stopKafkaIngestion(overlordAddress, dataSourceName);
        }
    } else if ("START".equalsIgnoreCase(targetState)) {
        if (existingSpec == null || !existingSpec.equals(spec)) {
            DruidKafkaUtils.updateKafkaIngestionSpec(overlordAddress, spec);
        }
    } else if ("RESET".equalsIgnoreCase(targetState)) {
        // Case when there are changes in multiple table properties.
        if (existingSpec != null && !existingSpec.equals(spec)) {
            DruidKafkaUtils.updateKafkaIngestionSpec(overlordAddress, spec);
        }
        resetKafkaIngestion(overlordAddress, dataSourceName);
    } else {
        throw new IllegalArgumentException(String.format("Invalid value for property [%s], Valid values are [START, STOP, RESET]", DruidConstants.DRUID_KAFKA_INGESTION));
    }
    // We do not want to keep state in two separate places so remove from hive table properties.
    table.getParameters().remove(DruidConstants.DRUID_KAFKA_INGESTION);
}
Also used : IndexSpec(org.apache.druid.segment.IndexSpec) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) Properties(java.util.Properties) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) DataSchema(org.apache.druid.segment.indexing.DataSchema) GranularitySpec(org.apache.druid.segment.indexing.granularity.GranularitySpec) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) InputRowParser(org.apache.druid.data.input.impl.InputRowParser) Map(java.util.Map) KafkaSupervisorSpec(org.apache.hadoop.hive.druid.json.KafkaSupervisorSpec)

Example 7 with InputRowParser

use of org.apache.druid.data.input.impl.InputRowParser in project hive by apache.

the class DruidOutputFormat method getHiveRecordWriter.

@Override
public FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties, Progressable progress) throws IOException {
    final int targetNumShardsPerGranularity = Integer.parseUnsignedInt(tableProperties.getProperty(Constants.DRUID_TARGET_SHARDS_PER_GRANULARITY, "0"));
    final int maxPartitionSize = targetNumShardsPerGranularity > 0 ? -1 : HiveConf.getIntVar(jc, HiveConf.ConfVars.HIVE_DRUID_MAX_PARTITION_SIZE);
    // If datasource is in the table properties, it is an INSERT/INSERT OVERWRITE as the datasource
    // name was already persisted. Otherwise, it is a CT/CTAS and we need to get the name from the
    // job properties that are set by configureOutputJobProperties in the DruidStorageHandler
    final String dataSource = tableProperties.getProperty(Constants.DRUID_DATA_SOURCE) == null ? jc.get(Constants.DRUID_DATA_SOURCE) : tableProperties.getProperty(Constants.DRUID_DATA_SOURCE);
    final String segmentDirectory = jc.get(DruidConstants.DRUID_SEGMENT_INTERMEDIATE_DIRECTORY);
    final GranularitySpec granularitySpec = DruidStorageHandlerUtils.getGranularitySpec(jc, tableProperties);
    final String columnNameProperty = tableProperties.getProperty(serdeConstants.LIST_COLUMNS);
    final String columnTypeProperty = tableProperties.getProperty(serdeConstants.LIST_COLUMN_TYPES);
    if (StringUtils.isEmpty(columnNameProperty) || StringUtils.isEmpty(columnTypeProperty)) {
        throw new IllegalStateException(String.format("List of columns names [%s] or columns type [%s] is/are not present", columnNameProperty, columnTypeProperty));
    }
    ArrayList<String> columnNames = Lists.newArrayList(columnNameProperty.split(","));
    if (!columnNames.contains(DruidConstants.DEFAULT_TIMESTAMP_COLUMN)) {
        throw new IllegalStateException("Timestamp column (' " + DruidConstants.DEFAULT_TIMESTAMP_COLUMN + "') not specified in create table; list of columns is : " + tableProperties.getProperty(serdeConstants.LIST_COLUMNS));
    }
    ArrayList<TypeInfo> columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
    Pair<List<DimensionSchema>, AggregatorFactory[]> dimensionsAndAggregates = DruidStorageHandlerUtils.getDimensionsAndAggregates(columnNames, columnTypes);
    final InputRowParser inputRowParser = new MapInputRowParser(new TimeAndDimsParseSpec(new TimestampSpec(DruidConstants.DEFAULT_TIMESTAMP_COLUMN, "auto", null), new DimensionsSpec(dimensionsAndAggregates.lhs, Lists.newArrayList(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME, Constants.DRUID_SHARD_KEY_COL_NAME), null)));
    Map<String, Object> inputParser = DruidStorageHandlerUtils.JSON_MAPPER.convertValue(inputRowParser, new TypeReference<Map<String, Object>>() {
    });
    final DataSchema dataSchema = new DataSchema(Preconditions.checkNotNull(dataSource, "Data source name is null"), inputParser, dimensionsAndAggregates.rhs, granularitySpec, null, DruidStorageHandlerUtils.JSON_MAPPER);
    final String workingPath = jc.get(DruidConstants.DRUID_JOB_WORKING_DIRECTORY);
    final String version = jc.get(DruidConstants.DRUID_SEGMENT_VERSION);
    String basePersistDirectory = HiveConf.getVar(jc, HiveConf.ConfVars.HIVE_DRUID_BASE_PERSIST_DIRECTORY);
    if (Strings.isNullOrEmpty(basePersistDirectory)) {
        basePersistDirectory = System.getProperty("java.io.tmpdir");
    }
    Integer maxRowInMemory = HiveConf.getIntVar(jc, HiveConf.ConfVars.HIVE_DRUID_MAX_ROW_IN_MEMORY);
    IndexSpec indexSpec = DruidStorageHandlerUtils.getIndexSpec(jc);
    RealtimeTuningConfig realtimeTuningConfig = new RealtimeTuningConfig(maxRowInMemory, null, null, null, new File(basePersistDirectory, dataSource), new CustomVersioningPolicy(version), null, null, null, indexSpec, null, true, 0, 0, true, null, 0L, null, null);
    LOG.debug(String.format("running with Data schema [%s] ", dataSchema));
    return new DruidRecordWriter(dataSchema, realtimeTuningConfig, DruidStorageHandlerUtils.createSegmentPusherForDirectory(segmentDirectory, jc), maxPartitionSize, new Path(workingPath, SEGMENTS_DESCRIPTOR_DIR_NAME), finalOutPath.getFileSystem(jc));
}
Also used : IndexSpec(org.apache.druid.segment.IndexSpec) MapInputRowParser(org.apache.druid.data.input.impl.MapInputRowParser) TimeAndDimsParseSpec(org.apache.druid.data.input.impl.TimeAndDimsParseSpec) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) ArrayList(java.util.ArrayList) List(java.util.List) Path(org.apache.hadoop.fs.Path) RealtimeTuningConfig(org.apache.druid.segment.indexing.RealtimeTuningConfig) TypeInfo(org.apache.hadoop.hive.serde2.typeinfo.TypeInfo) DataSchema(org.apache.druid.segment.indexing.DataSchema) GranularitySpec(org.apache.druid.segment.indexing.granularity.GranularitySpec) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) MapInputRowParser(org.apache.druid.data.input.impl.MapInputRowParser) InputRowParser(org.apache.druid.data.input.impl.InputRowParser) CustomVersioningPolicy(org.apache.druid.segment.realtime.plumber.CustomVersioningPolicy) Map(java.util.Map) File(java.io.File)

Example 8 with InputRowParser

use of org.apache.druid.data.input.impl.InputRowParser in project druid by druid-io.

the class IngestSegmentFirehoseFactoryTimelineTest method testSplit.

private void testSplit() throws Exception {
    Assert.assertTrue(factory.isSplittable());
    final int numSplits = factory.getNumSplits(null);
    // We set maxInputSegmentBytesPerSplit to 2 so each segment should become a byte.
    Assert.assertEquals(segmentCount, numSplits);
    final List<InputSplit<List<WindowedSegmentId>>> splits = factory.getSplits(null).collect(Collectors.toList());
    Assert.assertEquals(numSplits, splits.size());
    int count = 0;
    long sum = 0;
    for (InputSplit<List<WindowedSegmentId>> split : splits) {
        final FiniteFirehoseFactory<InputRowParser, List<WindowedSegmentId>> splitFactory = factory.withSplit(split);
        try (final Firehose firehose = splitFactory.connect(ROW_PARSER, tmpDir)) {
            while (firehose.hasMore()) {
                final InputRow row = firehose.nextRow();
                count++;
                sum += row.getMetric(METRICS[0]).longValue();
            }
        }
    }
    Assert.assertEquals("count", expectedCount, count);
    Assert.assertEquals("sum", expectedSum, sum);
}
Also used : Firehose(org.apache.druid.data.input.Firehose) InputRow(org.apache.druid.data.input.InputRow) MapBasedInputRow(org.apache.druid.data.input.MapBasedInputRow) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) InputRowParser(org.apache.druid.data.input.impl.InputRowParser) MapInputRowParser(org.apache.druid.data.input.impl.MapInputRowParser) InputSplit(org.apache.druid.data.input.InputSplit)

Example 9 with InputRowParser

use of org.apache.druid.data.input.impl.InputRowParser in project druid by druid-io.

the class BloomFilterSqlAggregatorTest method createQuerySegmentWalker.

@Override
public SpecificSegmentsQuerySegmentWalker createQuerySegmentWalker() throws IOException {
    InputRowParser parser = new MapInputRowParser(new TimeAndDimsParseSpec(new TimestampSpec("t", "iso", null), new DimensionsSpec(ImmutableList.<DimensionSchema>builder().addAll(DimensionsSpec.getDefaultSchemas(ImmutableList.of("dim1", "dim2", "dim3"))).add(new DoubleDimensionSchema("d1")).add(new FloatDimensionSchema("f1")).add(new LongDimensionSchema("l1")).build())));
    final QueryableIndex index = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1")).withDimensionsSpec(parser).withRollup(false).build()).rows(CalciteTests.ROWS1_WITH_NUMERIC_DIMS).buildMMappedIndex();
    return new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(DATA_SOURCE).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index);
}
Also used : MapInputRowParser(org.apache.druid.data.input.impl.MapInputRowParser) DoubleSumAggregatorFactory(org.apache.druid.query.aggregation.DoubleSumAggregatorFactory) LongDimensionSchema(org.apache.druid.data.input.impl.LongDimensionSchema) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) FloatDimensionSchema(org.apache.druid.data.input.impl.FloatDimensionSchema) DoubleDimensionSchema(org.apache.druid.data.input.impl.DoubleDimensionSchema) LongDimensionSchema(org.apache.druid.data.input.impl.LongDimensionSchema) FloatDimensionSchema(org.apache.druid.data.input.impl.FloatDimensionSchema) DimensionSchema(org.apache.druid.data.input.impl.DimensionSchema) TimeAndDimsParseSpec(org.apache.druid.data.input.impl.TimeAndDimsParseSpec) DoubleDimensionSchema(org.apache.druid.data.input.impl.DoubleDimensionSchema) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) SpecificSegmentsQuerySegmentWalker(org.apache.druid.sql.calcite.util.SpecificSegmentsQuerySegmentWalker) QueryableIndex(org.apache.druid.segment.QueryableIndex) TimestampSpec(org.apache.druid.data.input.impl.TimestampSpec) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) MapInputRowParser(org.apache.druid.data.input.impl.MapInputRowParser) InputRowParser(org.apache.druid.data.input.impl.InputRowParser) IncrementalIndexSchema(org.apache.druid.segment.incremental.IncrementalIndexSchema)

Example 10 with InputRowParser

use of org.apache.druid.data.input.impl.InputRowParser in project druid by druid-io.

the class HadoopyStringInputRowParserTest method testSerde.

@Test
public void testSerde() throws Exception {
    String jsonStr = "{" + "\"type\":\"hadoopyString\"," + "\"parseSpec\":{\"format\":\"json\",\"timestampSpec\":{\"column\":\"xXx\"},\"dimensionsSpec\":{}}" + "}";
    ObjectMapper jsonMapper = HadoopDruidIndexerConfig.JSON_MAPPER;
    InputRowParser parser = jsonMapper.readValue(jsonMapper.writeValueAsString(jsonMapper.readValue(jsonStr, InputRowParser.class)), InputRowParser.class);
    Assert.assertTrue(parser instanceof HadoopyStringInputRowParser);
    Assert.assertEquals("xXx", parser.getParseSpec().getTimestampSpec().getTimestampColumn());
}
Also used : InputRowParser(org.apache.druid.data.input.impl.InputRowParser) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Aggregations

InputRowParser (org.apache.druid.data.input.impl.InputRowParser)11 ArrayList (java.util.ArrayList)8 List (java.util.List)7 MapInputRowParser (org.apache.druid.data.input.impl.MapInputRowParser)7 TimestampSpec (org.apache.druid.data.input.impl.TimestampSpec)7 ImmutableList (com.google.common.collect.ImmutableList)6 File (java.io.File)6 InputRow (org.apache.druid.data.input.InputRow)6 DimensionsSpec (org.apache.druid.data.input.impl.DimensionsSpec)6 Map (java.util.Map)5 TimeAndDimsParseSpec (org.apache.druid.data.input.impl.TimeAndDimsParseSpec)5 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)4 IndexSpec (org.apache.druid.segment.IndexSpec)4 Test (org.junit.Test)4 TypeReference (com.fasterxml.jackson.core.type.TypeReference)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 IOException (java.io.IOException)3 Collectors (java.util.stream.Collectors)3 Configuration (org.apache.hadoop.conf.Configuration)3 Path (org.apache.hadoop.fs.Path)3