Search in sources :

Example 21 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class KafkaConnectorOptionsUtil method createKeyFormatProjection.

/**
 * Creates an array of indices that determine which physical fields of the table schema to
 * include in the key format and the order that those fields have in the key format.
 *
 * <p>See {@link KafkaConnectorOptions#KEY_FORMAT}, {@link KafkaConnectorOptions#KEY_FIELDS},
 * and {@link KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
 */
public static int[] createKeyFormatProjection(ReadableConfig options, DataType physicalDataType) {
    final LogicalType physicalType = physicalDataType.getLogicalType();
    Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
    final Optional<String> optionalKeyFormat = options.getOptional(KEY_FORMAT);
    final Optional<List<String>> optionalKeyFields = options.getOptional(KEY_FIELDS);
    if (!optionalKeyFormat.isPresent() && optionalKeyFields.isPresent()) {
        throw new ValidationException(String.format("The option '%s' can only be declared if a key format is defined using '%s'.", KEY_FIELDS.key(), KEY_FORMAT.key()));
    } else if (optionalKeyFormat.isPresent() && (!optionalKeyFields.isPresent() || optionalKeyFields.get().size() == 0)) {
        throw new ValidationException(String.format("A key format '%s' requires the declaration of one or more of key fields using '%s'.", KEY_FORMAT.key(), KEY_FIELDS.key()));
    }
    if (!optionalKeyFormat.isPresent()) {
        return new int[0];
    }
    final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
    final List<String> keyFields = optionalKeyFields.get();
    final List<String> physicalFields = LogicalTypeChecks.getFieldNames(physicalType);
    return keyFields.stream().mapToInt(keyField -> {
        final int pos = physicalFields.indexOf(keyField);
        // check that field name exists
        if (pos < 0) {
            throw new ValidationException(String.format("Could not find the field '%s' in the table schema for usage in the key format. " + "A key field must be a regular, physical column. " + "The following columns can be selected in the '%s' option:\n" + "%s", keyField, KEY_FIELDS.key(), physicalFields));
        }
        // check that field name is prefixed correctly
        if (!keyField.startsWith(keyPrefix)) {
            throw new ValidationException(String.format("All fields in '%s' must be prefixed with '%s' when option '%s' " + "is set but field '%s' is not prefixed.", KEY_FIELDS.key(), keyPrefix, KEY_FIELDS_PREFIX.key(), keyField));
        }
        return pos;
    }).toArray();
}
Also used : DynamicTableFactory(org.apache.flink.table.factories.DynamicTableFactory) IntStream(java.util.stream.IntStream) DeliveryGuarantee(org.apache.flink.connector.base.DeliveryGuarantee) DataType(org.apache.flink.table.types.DataType) FlinkException(org.apache.flink.util.FlinkException) ConfigOptions(org.apache.flink.configuration.ConfigOptions) Arrays(java.util.Arrays) SCAN_STARTUP_TIMESTAMP_MILLIS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_TIMESTAMP_MILLIS) KEY_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FORMAT) TOPIC(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TOPIC) TRANSACTIONAL_ID_PREFIX(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TRANSACTIONAL_ID_PREFIX) DELIVERY_GUARANTEE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.DELIVERY_GUARANTEE) TOPIC_PATTERN(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TOPIC_PATTERN) HashMap(java.util.HashMap) VALUE_FIELDS_INCLUDE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FIELDS_INCLUDE) InstantiationUtil(org.apache.flink.util.InstantiationUtil) ReadableConfig(org.apache.flink.configuration.ReadableConfig) Map(java.util.Map) ConfigOption(org.apache.flink.configuration.ConfigOption) FORMAT(org.apache.flink.table.factories.FactoryUtil.FORMAT) FlinkFixedPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner) SCAN_STARTUP_MODE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_MODE) RowData(org.apache.flink.table.data.RowData) Properties(java.util.Properties) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) ValueFieldsStrategy(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ValueFieldsStrategy) Configuration(org.apache.flink.configuration.Configuration) TableException(org.apache.flink.table.api.TableException) VALUE_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FORMAT) ScanStartupMode(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ScanStartupMode) Preconditions(org.apache.flink.util.Preconditions) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) List(java.util.List) SINK_PARTITIONER(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SINK_PARTITIONER) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) LogicalType(org.apache.flink.table.types.logical.LogicalType) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) SCAN_STARTUP_SPECIFIC_OFFSETS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_SPECIFIC_OFFSETS) Internal(org.apache.flink.annotation.Internal) Pattern(java.util.regex.Pattern) KEY_FIELDS_PREFIX(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FIELDS_PREFIX) LogicalTypeRoot(org.apache.flink.table.types.logical.LogicalTypeRoot) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) KEY_FIELDS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FIELDS) ValidationException(org.apache.flink.table.api.ValidationException) LogicalType(org.apache.flink.table.types.logical.LogicalType) List(java.util.List)

Example 22 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class JdbcDynamicTableFactory method createDynamicTableSink.

@Override
public DynamicTableSink createDynamicTableSink(Context context) {
    final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
    final ReadableConfig config = helper.getOptions();
    helper.validate();
    validateConfigOptions(config);
    validateDataTypeWithJdbcDialect(context.getPhysicalRowDataType(), config.get(URL));
    JdbcConnectorOptions jdbcOptions = getJdbcOptions(config);
    return new JdbcDynamicTableSink(jdbcOptions, getJdbcExecutionOptions(config), getJdbcDmlOptions(jdbcOptions, context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes()), context.getPhysicalRowDataType());
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) JdbcConnectorOptions(org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions) FactoryUtil(org.apache.flink.table.factories.FactoryUtil)

Example 23 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class UpsertKafkaDynamicTableFactory method createDynamicTableSource.

@Override
public DynamicTableSource createDynamicTableSource(Context context) {
    FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
    ReadableConfig tableOptions = helper.getOptions();
    DecodingFormat<DeserializationSchema<RowData>> keyDecodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, KEY_FORMAT);
    DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, VALUE_FORMAT);
    // Validate the option data type.
    helper.validateExcept(PROPERTIES_PREFIX);
    validateSource(tableOptions, keyDecodingFormat, valueDecodingFormat, context.getPrimaryKeyIndexes());
    Tuple2<int[], int[]> keyValueProjections = createKeyValueProjections(context.getCatalogTable());
    String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
    Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
    // always use earliest to keep data integrity
    StartupMode earliest = StartupMode.EARLIEST;
    return new KafkaDynamicSource(context.getPhysicalRowDataType(), keyDecodingFormat, new DecodingFormatWrapper(valueDecodingFormat), keyValueProjections.f0, keyValueProjections.f1, keyPrefix, getSourceTopics(tableOptions), getSourceTopicPattern(tableOptions), properties, earliest, Collections.emptyMap(), 0, true, context.getObjectIdentifier().asSummaryString());
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) KafkaConnectorOptionsUtil.getKafkaProperties(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.getKafkaProperties) Properties(java.util.Properties) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema)

Example 24 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class UpsertKafkaDynamicTableFactory method createDynamicTableSink.

@Override
public DynamicTableSink createDynamicTableSink(Context context) {
    FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
    final ReadableConfig tableOptions = helper.getOptions();
    EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat = helper.discoverEncodingFormat(SerializationFormatFactory.class, KEY_FORMAT);
    EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = helper.discoverEncodingFormat(SerializationFormatFactory.class, VALUE_FORMAT);
    // Validate the option data type.
    helper.validateExcept(PROPERTIES_PREFIX);
    validateSink(tableOptions, keyEncodingFormat, valueEncodingFormat, context.getPrimaryKeyIndexes());
    Tuple2<int[], int[]> keyValueProjections = createKeyValueProjections(context.getCatalogTable());
    final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
    final Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
    Integer parallelism = tableOptions.get(SINK_PARALLELISM);
    int batchSize = tableOptions.get(SINK_BUFFER_FLUSH_MAX_ROWS);
    Duration batchInterval = tableOptions.get(SINK_BUFFER_FLUSH_INTERVAL);
    SinkBufferFlushMode flushMode = new SinkBufferFlushMode(batchSize, batchInterval.toMillis());
    // it will use hash partition if key is set else in round-robin behaviour.
    return new KafkaDynamicSink(context.getPhysicalRowDataType(), context.getPhysicalRowDataType(), keyEncodingFormat, new EncodingFormatWrapper(valueEncodingFormat), keyValueProjections.f0, keyValueProjections.f1, keyPrefix, tableOptions.get(TOPIC).get(0), properties, null, DeliveryGuarantee.AT_LEAST_ONCE, true, flushMode, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
Also used : FactoryUtil(org.apache.flink.table.factories.FactoryUtil) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) Duration(java.time.Duration) KafkaConnectorOptionsUtil.getKafkaProperties(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.getKafkaProperties) Properties(java.util.Properties) ReadableConfig(org.apache.flink.configuration.ReadableConfig)

Example 25 with ReadableConfig

use of org.apache.flink.configuration.ReadableConfig in project flink by apache.

the class HBase2DynamicTableFactory method createDynamicTableSink.

@Override
public DynamicTableSink createDynamicTableSink(Context context) {
    TableFactoryHelper helper = createTableFactoryHelper(this, context);
    helper.validateExcept(PROPERTIES_PREFIX);
    final ReadableConfig tableOptions = helper.getOptions();
    validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
    String tableName = tableOptions.get(TABLE_NAME);
    Configuration hbaseConf = getHBaseConfiguration(tableOptions);
    HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
    String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
    HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
    return new HBaseDynamicTableSink(tableName, hbaseSchema, hbaseConf, hBaseWriteOptions, nullStringLiteral);
}
Also used : ReadableConfig(org.apache.flink.configuration.ReadableConfig) Configuration(org.apache.hadoop.conf.Configuration) HBaseConnectorOptionsUtil.getHBaseConfiguration(org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseConfiguration) TableFactoryHelper(org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper) FactoryUtil.createTableFactoryHelper(org.apache.flink.table.factories.FactoryUtil.createTableFactoryHelper) HBaseTableSchema(org.apache.flink.connector.hbase.util.HBaseTableSchema) HBaseDynamicTableSink(org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink) HBaseWriteOptions(org.apache.flink.connector.hbase.options.HBaseWriteOptions) HBaseConnectorOptionsUtil.getHBaseWriteOptions(org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseWriteOptions)

Aggregations

ReadableConfig (org.apache.flink.configuration.ReadableConfig)28 FactoryUtil (org.apache.flink.table.factories.FactoryUtil)9 RowData (org.apache.flink.table.data.RowData)8 DataType (org.apache.flink.table.types.DataType)7 Properties (java.util.Properties)6 TableException (org.apache.flink.table.api.TableException)6 TableFactoryHelper (org.apache.flink.table.factories.FactoryUtil.TableFactoryHelper)5 Arrays (java.util.Arrays)4 List (java.util.List)4 Optional (java.util.Optional)4 IntStream (java.util.stream.IntStream)4 Internal (org.apache.flink.annotation.Internal)4 DeserializationSchema (org.apache.flink.api.common.serialization.DeserializationSchema)4 ConfigOption (org.apache.flink.configuration.ConfigOption)4 HBaseConnectorOptionsUtil.getHBaseConfiguration (org.apache.flink.connector.hbase.table.HBaseConnectorOptionsUtil.getHBaseConfiguration)4 HBaseTableSchema (org.apache.flink.connector.hbase.util.HBaseTableSchema)4 DeliveryGuarantee (org.apache.flink.connector.base.DeliveryGuarantee)3 ChangelogMode (org.apache.flink.table.connector.ChangelogMode)3 EncodingFormat (org.apache.flink.table.connector.format.EncodingFormat)3 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)3