use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class RawFormatFactoryTest method createSerializationSchema.
private static SerializationSchema<RowData> createSerializationSchema(ResolvedSchema schema, Map<String, String> options) {
final DynamicTableSink actualSink = createTableSink(schema, options);
assertThat(actualSink, instanceOf(TestDynamicTableFactory.DynamicTableSinkMock.class));
TestDynamicTableFactory.DynamicTableSinkMock sinkMock = (TestDynamicTableFactory.DynamicTableSinkMock) actualSink;
return sinkMock.valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), schema.toPhysicalRowDataType());
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class KafkaDynamicTableFactoryTest method verifyEncoderSubject.
private void verifyEncoderSubject(Consumer<Map<String, String>> optionModifier, String expectedValueSubject, String expectedKeySubject) {
Map<String, String> options = new HashMap<>();
// Kafka specific options.
options.put("connector", KafkaDynamicTableFactory.IDENTIFIER);
options.put("topic", TOPIC);
options.put("properties.group.id", "dummy");
options.put("properties.bootstrap.servers", "dummy");
optionModifier.accept(options);
final RowType rowType = (RowType) SCHEMA_DATA_TYPE.getLogicalType();
final String valueFormat = options.getOrDefault(FactoryUtil.FORMAT.key(), options.get(KafkaConnectorOptions.VALUE_FORMAT.key()));
final String keyFormat = options.get(KafkaConnectorOptions.KEY_FORMAT.key());
KafkaDynamicSink sink = (KafkaDynamicSink) createTableSink(SCHEMA, options);
final Set<String> avroFormats = new HashSet<>();
avroFormats.add(AVRO_CONFLUENT);
avroFormats.add(DEBEZIUM_AVRO_CONFLUENT);
if (avroFormats.contains(valueFormat)) {
SerializationSchema<RowData> actualValueEncoder = sink.valueEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SCHEMA_DATA_TYPE);
final SerializationSchema<RowData> expectedValueEncoder;
if (AVRO_CONFLUENT.equals(valueFormat)) {
expectedValueEncoder = createConfluentAvroSerSchema(rowType, expectedValueSubject);
} else {
expectedValueEncoder = createDebeziumAvroSerSchema(rowType, expectedValueSubject);
}
assertThat(actualValueEncoder).isEqualTo(expectedValueEncoder);
}
if (avroFormats.contains(keyFormat)) {
assert sink.keyEncodingFormat != null;
SerializationSchema<RowData> actualKeyEncoder = sink.keyEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SCHEMA_DATA_TYPE);
final SerializationSchema<RowData> expectedKeyEncoder;
if (AVRO_CONFLUENT.equals(keyFormat)) {
expectedKeyEncoder = createConfluentAvroSerSchema(rowType, expectedKeySubject);
} else {
expectedKeyEncoder = createDebeziumAvroSerSchema(rowType, expectedKeySubject);
}
assertThat(actualKeyEncoder).isEqualTo(expectedKeyEncoder);
}
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSink.
@Test
public void testTableSink() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> {
options.put("sink.delivery-guarantee", "exactly-once");
options.put("sink.transactional-id-prefix", "kafka-sink");
});
final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, null, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
// Test kafka producer.
final KafkaDynamicSink actualKafkaSink = (KafkaDynamicSink) actualSink;
DynamicTableSink.SinkRuntimeProvider provider = actualKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
final Sink<RowData> sinkFunction = sinkProvider.createSink();
assertThat(sinkFunction).isInstanceOf(KafkaSink.class);
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSink.
@Test
public void testTableSink() {
// Construct table sink using options and table sink factory.
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getFullSinkOptions());
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, null);
// Test sink format.
final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
assertEquals(expectedSink, actualSink);
// Test kafka producer.
DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(SinkV2Provider.class));
final SinkV2Provider sinkFunctionProvider = (SinkV2Provider) provider;
final Sink<RowData> sink = sinkFunctionProvider.createSink();
assertThat(sink, instanceOf(KafkaSink.class));
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class DebeziumJsonFormatFactoryTest method testSeDeSchema.
@Test
public void testSeDeSchema() {
final DebeziumJsonDeserializationSchema expectedDeser = new DebeziumJsonDeserializationSchema(PHYSICAL_DATA_TYPE, Collections.emptyList(), InternalTypeInfo.of(PHYSICAL_TYPE), false, true, TimestampFormat.ISO_8601);
final Map<String, String> options = getAllOptions();
final DynamicTableSource actualSource = createTableSource(SCHEMA, options);
assert actualSource instanceof TestDynamicTableFactory.DynamicTableSourceMock;
TestDynamicTableFactory.DynamicTableSourceMock scanSourceMock = (TestDynamicTableFactory.DynamicTableSourceMock) actualSource;
DeserializationSchema<RowData> actualDeser = scanSourceMock.valueFormat.createRuntimeDecoder(ScanRuntimeProviderContext.INSTANCE, PHYSICAL_DATA_TYPE);
assertEquals(expectedDeser, actualDeser);
final DebeziumJsonSerializationSchema expectedSer = new DebeziumJsonSerializationSchema((RowType) PHYSICAL_DATA_TYPE.getLogicalType(), TimestampFormat.ISO_8601, JsonFormatOptions.MapNullKeyMode.LITERAL, "null", true);
final DynamicTableSink actualSink = createTableSink(SCHEMA, options);
assert actualSink instanceof TestDynamicTableFactory.DynamicTableSinkMock;
TestDynamicTableFactory.DynamicTableSinkMock sinkMock = (TestDynamicTableFactory.DynamicTableSinkMock) actualSink;
SerializationSchema<RowData> actualSer = sinkMock.valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), PHYSICAL_DATA_TYPE);
assertEquals(expectedSer, actualSer);
}
Aggregations