use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method verifyEncoderSubject.
private void verifyEncoderSubject(Consumer<Map<String, String>> optionModifier, String expectedValueSubject, String expectedKeySubject) {
Map<String, String> options = new HashMap<>();
// Kafka specific options.
options.put("connector", UpsertKafkaDynamicTableFactory.IDENTIFIER);
options.put("topic", SINK_TOPIC);
options.put("properties.group.id", "dummy");
options.put("properties.bootstrap.servers", "dummy");
optionModifier.accept(options);
final RowType rowType = (RowType) SINK_SCHEMA.toSinkRowDataType().getLogicalType();
final String valueFormat = options.getOrDefault(FactoryUtil.FORMAT.key(), options.get(KafkaConnectorOptions.VALUE_FORMAT.key()));
final String keyFormat = options.get(KafkaConnectorOptions.KEY_FORMAT.key());
KafkaDynamicSink sink = (KafkaDynamicSink) createTableSink(SINK_SCHEMA, options);
if (AVRO_CONFLUENT.equals(valueFormat)) {
SerializationSchema<RowData> actualValueEncoder = sink.valueEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SINK_SCHEMA.toSinkRowDataType());
assertEquals(createConfluentAvroSerSchema(rowType, expectedValueSubject), actualValueEncoder);
}
if (AVRO_CONFLUENT.equals(keyFormat)) {
assert sink.keyEncodingFormat != null;
SerializationSchema<RowData> actualKeyEncoder = sink.keyEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SINK_SCHEMA.toSinkRowDataType());
assertEquals(createConfluentAvroSerSchema(rowType, expectedKeySubject), actualKeyEncoder);
}
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class CanalJsonDeserializationSchema method createJsonRowType.
// --------------------------------------------------------------------------------------------
private static RowType createJsonRowType(DataType physicalDataType, List<ReadableMetadata> readableMetadata) {
// Canal JSON contains other information, e.g. "ts", "sql", but we don't need them
DataType root = DataTypes.ROW(DataTypes.FIELD("data", DataTypes.ARRAY(physicalDataType)), DataTypes.FIELD("old", DataTypes.ARRAY(physicalDataType)), DataTypes.FIELD("type", DataTypes.STRING()), ReadableMetadata.DATABASE.requiredJsonField, ReadableMetadata.TABLE.requiredJsonField);
// append fields that are required for reading metadata in the root
final List<DataTypes.Field> rootMetadataFields = readableMetadata.stream().filter(m -> m != ReadableMetadata.DATABASE && m != ReadableMetadata.TABLE).map(m -> m.requiredJsonField).distinct().collect(Collectors.toList());
return (RowType) DataTypeUtils.appendRowFields(root, rootMetadataFields).getLogicalType();
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class DebeziumJsonDeserializationSchema method createJsonRowType.
// --------------------------------------------------------------------------------------------
private static RowType createJsonRowType(DataType physicalDataType, List<ReadableMetadata> readableMetadata, boolean schemaInclude) {
DataType payload = DataTypes.ROW(DataTypes.FIELD("before", physicalDataType), DataTypes.FIELD("after", physicalDataType), DataTypes.FIELD("op", DataTypes.STRING()));
// append fields that are required for reading metadata in the payload
final List<DataTypes.Field> payloadMetadataFields = readableMetadata.stream().filter(m -> m.isJsonPayload).map(m -> m.requiredJsonField).distinct().collect(Collectors.toList());
payload = DataTypeUtils.appendRowFields(payload, payloadMetadataFields);
DataType root = payload;
if (schemaInclude) {
// when Debezium Kafka connect enables "value.converter.schemas.enable",
// the JSON will contain "schema" information and we need to extract data from
// "payload".
root = DataTypes.ROW(DataTypes.FIELD("payload", payload));
}
// append fields that are required for reading metadata in the root
final List<DataTypes.Field> rootMetadataFields = readableMetadata.stream().filter(m -> !m.isJsonPayload).map(m -> m.requiredJsonField).distinct().collect(Collectors.toList());
root = DataTypeUtils.appendRowFields(root, rootMetadataFields);
return (RowType) root.getLogicalType();
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class MaxwellJsonFormatFactory method createEncodingFormat.
@Override
public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
validateEncodingFormatOptions(formatOptions);
TimestampFormat timestampFormat = JsonFormatOptionsUtil.getTimestampFormat(formatOptions);
JsonFormatOptions.MapNullKeyMode mapNullKeyMode = JsonFormatOptionsUtil.getMapNullKeyMode(formatOptions);
String mapNullKeyLiteral = formatOptions.get(JSON_MAP_NULL_KEY_LITERAL);
final boolean encodeDecimalAsPlainNumber = formatOptions.get(ENCODE_DECIMAL_AS_PLAIN_NUMBER);
return new EncodingFormat<SerializationSchema<RowData>>() {
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.newBuilder().addContainedKind(RowKind.INSERT).addContainedKind(RowKind.UPDATE_BEFORE).addContainedKind(RowKind.UPDATE_AFTER).addContainedKind(RowKind.DELETE).build();
}
@Override
public SerializationSchema<RowData> createRuntimeEncoder(DynamicTableSink.Context context, DataType consumedDataType) {
final RowType rowType = (RowType) consumedDataType.getLogicalType();
return new MaxwellJsonSerializationSchema(rowType, timestampFormat, mapNullKeyMode, mapNullKeyLiteral, encodeDecimalAsPlainNumber);
}
};
}
use of org.apache.flink.table.types.logical.RowType in project flink by apache.
the class JsonFormatFactory method createEncodingFormat.
@Override
public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(DynamicTableFactory.Context context, ReadableConfig formatOptions) {
FactoryUtil.validateFactoryOptions(this, formatOptions);
JsonFormatOptionsUtil.validateEncodingFormatOptions(formatOptions);
TimestampFormat timestampOption = JsonFormatOptionsUtil.getTimestampFormat(formatOptions);
JsonFormatOptions.MapNullKeyMode mapNullKeyMode = JsonFormatOptionsUtil.getMapNullKeyMode(formatOptions);
String mapNullKeyLiteral = formatOptions.get(MAP_NULL_KEY_LITERAL);
final boolean encodeDecimalAsPlainNumber = formatOptions.get(ENCODE_DECIMAL_AS_PLAIN_NUMBER);
return new EncodingFormat<SerializationSchema<RowData>>() {
@Override
public SerializationSchema<RowData> createRuntimeEncoder(DynamicTableSink.Context context, DataType consumedDataType) {
final RowType rowType = (RowType) consumedDataType.getLogicalType();
return new JsonRowDataSerializationSchema(rowType, timestampOption, mapNullKeyMode, mapNullKeyLiteral, encodeDecimalAsPlainNumber);
}
@Override
public ChangelogMode getChangelogMode() {
return ChangelogMode.insertOnly();
}
};
}
Aggregations