Search in sources :

Example 1 with TableBucketAssigner

use of org.apache.flink.connector.file.table.FileSystemTableSink.TableBucketAssigner in project flink by apache.

the class HiveTableSink method createStreamSink.

private DataStreamSink<?> createStreamSink(ProviderContext providerContext, DataStream<RowData> dataStream, StorageDescriptor sd, Properties tableProps, HiveWriterFactory recordWriterFactory, OutputFileConfig.OutputFileConfigBuilder fileNamingBuilder, final int parallelism) {
    org.apache.flink.configuration.Configuration conf = new org.apache.flink.configuration.Configuration();
    catalogTable.getOptions().forEach(conf::setString);
    String commitPolicies = conf.getString(FileSystemConnectorOptions.SINK_PARTITION_COMMIT_POLICY_KIND);
    if (!getPartitionKeys().isEmpty() && StringUtils.isNullOrWhitespaceOnly(commitPolicies)) {
        throw new FlinkHiveException(String.format("Streaming write to partitioned hive table %s without providing a commit policy. " + "Make sure to set a proper value for %s", identifier, FileSystemConnectorOptions.SINK_PARTITION_COMMIT_POLICY_KIND.key()));
    }
    HiveRowDataPartitionComputer partComputer = new HiveRowDataPartitionComputer(hiveShim, JobConfUtils.getDefaultPartitionName(jobConf), tableSchema.getFieldNames(), tableSchema.getFieldDataTypes(), getPartitionKeyArray());
    TableBucketAssigner assigner = new TableBucketAssigner(partComputer);
    HiveRollingPolicy rollingPolicy = new HiveRollingPolicy(conf.get(SINK_ROLLING_POLICY_FILE_SIZE).getBytes(), conf.get(SINK_ROLLING_POLICY_ROLLOVER_INTERVAL).toMillis(), conf.get(SINK_ROLLING_POLICY_INACTIVITY_INTERVAL).toMillis());
    boolean autoCompaction = conf.getBoolean(FileSystemConnectorOptions.AUTO_COMPACTION);
    if (autoCompaction) {
        fileNamingBuilder.withPartPrefix(convertToUncompacted(fileNamingBuilder.build().getPartPrefix()));
    }
    OutputFileConfig outputFileConfig = fileNamingBuilder.build();
    org.apache.flink.core.fs.Path path = new org.apache.flink.core.fs.Path(sd.getLocation());
    BucketsBuilder<RowData, String, ? extends BucketsBuilder<RowData, ?, ?>> builder;
    if (flinkConf.get(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_WRITER)) {
        builder = bucketsBuilderForMRWriter(recordWriterFactory, sd, assigner, rollingPolicy, outputFileConfig);
        LOG.info("Hive streaming sink: Use MapReduce RecordWriter writer.");
    } else {
        Optional<BulkWriter.Factory<RowData>> bulkFactory = createBulkWriterFactory(getPartitionKeyArray(), sd);
        if (bulkFactory.isPresent()) {
            builder = StreamingFileSink.forBulkFormat(path, new FileSystemTableSink.ProjectionBulkFactory(bulkFactory.get(), partComputer)).withBucketAssigner(assigner).withRollingPolicy(rollingPolicy).withOutputFileConfig(outputFileConfig);
            LOG.info("Hive streaming sink: Use native parquet&orc writer.");
        } else {
            builder = bucketsBuilderForMRWriter(recordWriterFactory, sd, assigner, rollingPolicy, outputFileConfig);
            LOG.info("Hive streaming sink: Use MapReduce RecordWriter writer because BulkWriter Factory not available.");
        }
    }
    long bucketCheckInterval = conf.get(SINK_ROLLING_POLICY_CHECK_INTERVAL).toMillis();
    DataStream<PartitionCommitInfo> writerStream;
    if (autoCompaction) {
        long compactionSize = conf.getOptional(FileSystemConnectorOptions.COMPACTION_FILE_SIZE).orElse(conf.get(SINK_ROLLING_POLICY_FILE_SIZE)).getBytes();
        writerStream = StreamingSink.compactionWriter(providerContext, dataStream, bucketCheckInterval, builder, fsFactory(), path, createCompactReaderFactory(sd, tableProps), compactionSize, parallelism);
    } else {
        writerStream = StreamingSink.writer(providerContext, dataStream, bucketCheckInterval, builder, parallelism, getPartitionKeys(), conf);
    }
    return StreamingSink.sink(providerContext, writerStream, path, identifier, getPartitionKeys(), msFactory(), fsFactory(), conf);
}
Also used : TableBucketAssigner(org.apache.flink.connector.file.table.FileSystemTableSink.TableBucketAssigner) Configuration(org.apache.hadoop.conf.Configuration) ThreadLocalClassLoaderConfiguration(org.apache.flink.orc.writer.ThreadLocalClassLoaderConfiguration) HiveMetastoreClientFactory(org.apache.flink.table.catalog.hive.client.HiveMetastoreClientFactory) HiveBulkWriterFactory(org.apache.flink.connectors.hive.write.HiveBulkWriterFactory) LoggerFactory(org.slf4j.LoggerFactory) HiveWriterFactory(org.apache.flink.connectors.hive.write.HiveWriterFactory) HiveCompactReaderFactory(org.apache.flink.connectors.hive.read.HiveCompactReaderFactory) HiveOutputFormatFactory(org.apache.flink.connectors.hive.write.HiveOutputFormatFactory) OutputFileConfig(org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig) RowData(org.apache.flink.table.data.RowData) Path(org.apache.hadoop.fs.Path) PartitionCommitInfo(org.apache.flink.connector.file.table.stream.PartitionCommitInfo) FileSystemTableSink(org.apache.flink.connector.file.table.FileSystemTableSink)

Aggregations

FileSystemTableSink (org.apache.flink.connector.file.table.FileSystemTableSink)1 TableBucketAssigner (org.apache.flink.connector.file.table.FileSystemTableSink.TableBucketAssigner)1 PartitionCommitInfo (org.apache.flink.connector.file.table.stream.PartitionCommitInfo)1 HiveCompactReaderFactory (org.apache.flink.connectors.hive.read.HiveCompactReaderFactory)1 HiveBulkWriterFactory (org.apache.flink.connectors.hive.write.HiveBulkWriterFactory)1 HiveOutputFormatFactory (org.apache.flink.connectors.hive.write.HiveOutputFormatFactory)1 HiveWriterFactory (org.apache.flink.connectors.hive.write.HiveWriterFactory)1 ThreadLocalClassLoaderConfiguration (org.apache.flink.orc.writer.ThreadLocalClassLoaderConfiguration)1 OutputFileConfig (org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig)1 HiveMetastoreClientFactory (org.apache.flink.table.catalog.hive.client.HiveMetastoreClientFactory)1 RowData (org.apache.flink.table.data.RowData)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 LoggerFactory (org.slf4j.LoggerFactory)1