use of org.apache.flink.table.types.DataType in project flink by apache.
the class DebeziumAvroFormatFactory method createEncodingFormat.
@Override
public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
String schemaRegistryURL = formatOptions.get(URL);
Optional<String> subject = formatOptions.getOptional(SUBJECT);
Map<String, ?> optionalPropertiesMap = buildOptionalPropertiesMap(formatOptions);
if (!subject.isPresent()) {
throw new ValidationException(String.format("Option '%s.%s' is required for serialization", IDENTIFIER, SUBJECT.key()));
}
return new EncodingFormat<SerializationSchema<RowData>>() {
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.newBuilder().addContainedKind(RowKind.INSERT).addContainedKind(RowKind.UPDATE_BEFORE).addContainedKind(RowKind.UPDATE_AFTER).addContainedKind(RowKind.DELETE).build();
}
@Override
public SerializationSchema<RowData> createRuntimeEncoder(DynamicTableSink.Context context, DataType consumedDataType) {
final RowType rowType = (RowType) consumedDataType.getLogicalType();
return new DebeziumAvroSerializationSchema(rowType, schemaRegistryURL, subject.get(), optionalPropertiesMap);
}
};
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DebeziumAvroFormatFactory method createDecodingFormat.
@Override
public DecodingFormat<DeserializationSchema<RowData>> createDecodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
String schemaRegistryURL = formatOptions.get(URL);
Map<String, ?> optionalPropertiesMap = buildOptionalPropertiesMap(formatOptions);
return new ProjectableDecodingFormat<DeserializationSchema<RowData>>() {
@Override
public DeserializationSchema<RowData> createRuntimeDecoder(DynamicTableSource.Context context, DataType producedDataType, int[][] projections) {
producedDataType = Projection.of(projections).project(producedDataType);
final RowType rowType = (RowType) producedDataType.getLogicalType();
final TypeInformation<RowData> producedTypeInfo = context.createTypeInformation(producedDataType);
return new DebeziumAvroDeserializationSchema(rowType, producedTypeInfo, schemaRegistryURL, optionalPropertiesMap);
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.newBuilder().addContainedKind(RowKind.INSERT).addContainedKind(RowKind.UPDATE_BEFORE).addContainedKind(RowKind.UPDATE_AFTER).addContainedKind(RowKind.DELETE).build();
}
};
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaDynamicTableFactory method createDynamicTableSink.
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, autoCompleteSchemaRegistrySubject(context));
final Optional<EncodingFormat<SerializationSchema<RowData>>> keyEncodingFormat = getKeyEncodingFormat(helper);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = getValueEncodingFormat(helper);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
final DeliveryGuarantee deliveryGuarantee = validateDeprecatedSemantic(tableOptions);
validateTableSinkOptions(tableOptions);
KafkaConnectorOptionsUtil.validateDeliveryGuarantee(tableOptions);
validatePKConstraints(context.getObjectIdentifier(), context.getPrimaryKeyIndexes(), context.getCatalogTable().getOptions(), valueEncodingFormat);
final DataType physicalDataType = context.getPhysicalRowDataType();
final int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
final int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
final Integer parallelism = tableOptions.getOptional(SINK_PARALLELISM).orElse(null);
return createKafkaTableSink(physicalDataType, keyEncodingFormat.orElse(null), valueEncodingFormat, keyProjection, valueProjection, keyPrefix, tableOptions.get(TOPIC).get(0), getKafkaProperties(context.getCatalogTable().getOptions()), getFlinkKafkaPartitioner(tableOptions, context.getClassLoader()).orElse(null), deliveryGuarantee, parallelism, tableOptions.get(TRANSACTIONAL_ID_PREFIX));
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
final TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
final Optional<DecodingFormat<DeserializationSchema<RowData>>> keyDecodingFormat = getKeyDecodingFormat(helper);
final DecodingFormat<DeserializationSchema<RowData>> valueDecodingFormat = getValueDecodingFormat(helper);
helper.validateExcept(PROPERTIES_PREFIX);
final ReadableConfig tableOptions = helper.getOptions();
validateTableSourceOptions(tableOptions);
validatePKConstraints(context.getObjectIdentifier(), context.getPrimaryKeyIndexes(), context.getCatalogTable().getOptions(), valueDecodingFormat);
final StartupOptions startupOptions = getStartupOptions(tableOptions);
final Properties properties = getKafkaProperties(context.getCatalogTable().getOptions());
// add topic-partition discovery
final Optional<Long> partitionDiscoveryInterval = tableOptions.getOptional(SCAN_TOPIC_PARTITION_DISCOVERY).map(Duration::toMillis);
properties.setProperty(KafkaSourceOptions.PARTITION_DISCOVERY_INTERVAL_MS.key(), partitionDiscoveryInterval.orElse(-1L).toString());
final DataType physicalDataType = context.getPhysicalRowDataType();
final int[] keyProjection = createKeyFormatProjection(tableOptions, physicalDataType);
final int[] valueProjection = createValueFormatProjection(tableOptions, physicalDataType);
final String keyPrefix = tableOptions.getOptional(KEY_FIELDS_PREFIX).orElse(null);
return createKafkaTableSource(physicalDataType, keyDecodingFormat.orElse(null), valueDecodingFormat, keyProjection, valueProjection, keyPrefix, getSourceTopics(tableOptions), getSourceTopicPattern(tableOptions), properties, startupOptions.startupMode, startupOptions.specificOffsets, startupOptions.startupTimestampMillis, context.getObjectIdentifier().asSummaryString());
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KafkaConnectorOptionsUtil method createValueFormatProjection.
/**
* Creates an array of indices that determine which physical fields of the table schema to
* include in the value format.
*
* <p>See {@link KafkaConnectorOptions#VALUE_FORMAT}, {@link
* KafkaConnectorOptions#VALUE_FIELDS_INCLUDE}, and {@link
* KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
*/
public static int[] createValueFormatProjection(ReadableConfig options, DataType physicalDataType) {
final LogicalType physicalType = physicalDataType.getLogicalType();
Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
final int physicalFieldCount = LogicalTypeChecks.getFieldCount(physicalType);
final IntStream physicalFields = IntStream.range(0, physicalFieldCount);
final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
final ValueFieldsStrategy strategy = options.get(VALUE_FIELDS_INCLUDE);
if (strategy == ValueFieldsStrategy.ALL) {
if (keyPrefix.length() > 0) {
throw new ValidationException(String.format("A key prefix is not allowed when option '%s' is set to '%s'. " + "Set it to '%s' instead to avoid field overlaps.", VALUE_FIELDS_INCLUDE.key(), ValueFieldsStrategy.ALL, ValueFieldsStrategy.EXCEPT_KEY));
}
return physicalFields.toArray();
} else if (strategy == ValueFieldsStrategy.EXCEPT_KEY) {
final int[] keyProjection = createKeyFormatProjection(options, physicalDataType);
return physicalFields.filter(pos -> IntStream.of(keyProjection).noneMatch(k -> k == pos)).toArray();
}
throw new TableException("Unknown value fields strategy:" + strategy);
}
Aggregations