use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class KafkaConnectorOptionsUtil method createKeyFormatProjection.
/**
* Creates an array of indices that determine which physical fields of the table schema to
* include in the key format and the order that those fields have in the key format.
*
* <p>See {@link KafkaConnectorOptions#KEY_FORMAT}, {@link KafkaConnectorOptions#KEY_FIELDS},
* and {@link KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
*/
public static int[] createKeyFormatProjection(ReadableConfig options, DataType physicalDataType) {
final LogicalType physicalType = physicalDataType.getLogicalType();
Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
final Optional<String> optionalKeyFormat = options.getOptional(KEY_FORMAT);
final Optional<List<String>> optionalKeyFields = options.getOptional(KEY_FIELDS);
if (!optionalKeyFormat.isPresent() && optionalKeyFields.isPresent()) {
throw new ValidationException(String.format("The option '%s' can only be declared if a key format is defined using '%s'.", KEY_FIELDS.key(), KEY_FORMAT.key()));
} else if (optionalKeyFormat.isPresent() && (!optionalKeyFields.isPresent() || optionalKeyFields.get().size() == 0)) {
throw new ValidationException(String.format("A key format '%s' requires the declaration of one or more of key fields using '%s'.", KEY_FORMAT.key(), KEY_FIELDS.key()));
}
if (!optionalKeyFormat.isPresent()) {
return new int[0];
}
final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
final List<String> keyFields = optionalKeyFields.get();
final List<String> physicalFields = LogicalTypeChecks.getFieldNames(physicalType);
return keyFields.stream().mapToInt(keyField -> {
final int pos = physicalFields.indexOf(keyField);
// check that field name exists
if (pos < 0) {
throw new ValidationException(String.format("Could not find the field '%s' in the table schema for usage in the key format. " + "A key field must be a regular, physical column. " + "The following columns can be selected in the '%s' option:\n" + "%s", keyField, KEY_FIELDS.key(), physicalFields));
}
// check that field name is prefixed correctly
if (!keyField.startsWith(keyPrefix)) {
throw new ValidationException(String.format("All fields in '%s' must be prefixed with '%s' when option '%s' " + "is set but field '%s' is not prefixed.", KEY_FIELDS.key(), keyPrefix, KEY_FIELDS_PREFIX.key(), keyField));
}
return pos;
}).toArray();
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class JdbcDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
final ReadableConfig config = helper.getOptions();
helper.validate();
validateConfigOptions(config);
validateDataTypeWithJdbcDialect(context.getPhysicalRowDataType(), config.get(URL));
JdbcConnectorOptions jdbcOptions = getJdbcOptions(config);
return new JdbcDynamicTableSink(jdbcOptions, getJdbcExecutionOptions(config), getJdbcDmlOptions(jdbcOptions, context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes()), context.getPhysicalRowDataType());
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class UpsertKafkaDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
ReadableConfig tableOptions = helper.getOptions();
DecodingFormat<DeserializationSchema<RowData>> keyDecodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, KEY_FORMAT);
DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, VALUE_FORMAT);
// Validate the option data type.
helper.validateExcept(PROPERTIES_PREFIX);
validateSource(tableOptions, keyDecodingFormat, valueDecodingFormat, context.getPrimaryKeyIndexes());
Tuple2<int[], int[]> keyValueProjections = createKeyValueProjections(context.getCatalogTable());
String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
// always use earliest to keep data integrity
StartupMode earliest = StartupMode.EARLIEST;
return new KafkaDynamicSource(context.getPhysicalRowDataType(), keyDecodingFormat, new DecodingFormatWrapper(valueDecodingFormat), keyValueProjections.f0, keyValueProjections.f1, keyPrefix, getSourceTopics(tableOptions), getSourceTopicPattern(tableOptions), properties, earliest, Collections.emptyMap(), 0, true, context.getObjectIdentifier().asSummaryString());
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class UpsertKafkaDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
final ReadableConfig tableOptions = helper.getOptions();
EncodingFormat<SerializationSchema<RowData>> keyEncodingFormat = helper.discoverEncodingFormat(SerializationFormatFactory.class, KEY_FORMAT);
EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = helper.discoverEncodingFormat(SerializationFormatFactory.class, VALUE_FORMAT);
// Validate the option data type.
helper.validateExcept(PROPERTIES_PREFIX);
validateSink(tableOptions, keyEncodingFormat, valueEncodingFormat, context.getPrimaryKeyIndexes());
Tuple2<int[], int[]> keyValueProjections = createKeyValueProjections(context.getCatalogTable());
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
final Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
Integer parallelism = tableOptions.get(SINK_PARALLELISM);
int batchSize = tableOptions.get(SINK_BUFFER_FLUSH_MAX_ROWS);
Duration batchInterval = tableOptions.get(SINK_BUFFER_FLUSH_INTERVAL);
SinkBufferFlushMode flushMode = new SinkBufferFlushMode(batchSize, batchInterval.toMillis());
// it will use hash partition if key is set else in round-robin behaviour.
return new KafkaDynamicSink(context.getPhysicalRowDataType(), context.getPhysicalRowDataType(), keyEncodingFormat, new EncodingFormatWrapper(valueEncodingFormat), keyValueProjections.f0, keyValueProjections.f1, keyPrefix, tableOptions.get(TOPIC).get(0), properties, null, DeliveryGuarantee.AT_LEAST_ONCE, true, flushMode, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
use of org.apache.flink.configuration.ReadableConfig in project flink by apache.
the class HBase2DynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
TableFactoryHelper helper = createTableFactoryHelper(this, context);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validatePrimaryKey(context.getPhysicalRowDataType(), context.getPrimaryKeyIndexes());
String tableName = tableOptions.get(TABLE_NAME);
Configuration hbaseConf = getHBaseConfiguration(tableOptions);
HBaseWriteOptions hBaseWriteOptions = getHBaseWriteOptions(tableOptions);
String nullStringLiteral = tableOptions.get(NULL_STRING_LITERAL);
HBaseTableSchema hbaseSchema = HBaseTableSchema.fromDataType(context.getPhysicalRowDataType());
return new HBaseDynamicTableSink(tableName, hbaseSchema, hbaseConf, hBaseWriteOptions, nullStringLiteral);
}
Aggregations