Search in sources :

Example 16 with SinkRuntimeProviderContext

use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.

the class RawFormatFactoryTest method createSerializationSchema.

private static SerializationSchema<RowData> createSerializationSchema(ResolvedSchema schema, Map<String, String> options) {
    final DynamicTableSink actualSink = createTableSink(schema, options);
    assertThat(actualSink, instanceOf(TestDynamicTableFactory.DynamicTableSinkMock.class));
    TestDynamicTableFactory.DynamicTableSinkMock sinkMock = (TestDynamicTableFactory.DynamicTableSinkMock) actualSink;
    return sinkMock.valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), schema.toPhysicalRowDataType());
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) TestDynamicTableFactory(org.apache.flink.table.factories.TestDynamicTableFactory)

Example 17 with SinkRuntimeProviderContext

use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.

the class KafkaDynamicTableFactoryTest method verifyEncoderSubject.

private void verifyEncoderSubject(Consumer<Map<String, String>> optionModifier, String expectedValueSubject, String expectedKeySubject) {
    Map<String, String> options = new HashMap<>();
    // Kafka specific options.
    options.put("connector", KafkaDynamicTableFactory.IDENTIFIER);
    options.put("topic", TOPIC);
    options.put("properties.group.id", "dummy");
    options.put("properties.bootstrap.servers", "dummy");
    optionModifier.accept(options);
    final RowType rowType = (RowType) SCHEMA_DATA_TYPE.getLogicalType();
    final String valueFormat = options.getOrDefault(FactoryUtil.FORMAT.key(), options.get(KafkaConnectorOptions.VALUE_FORMAT.key()));
    final String keyFormat = options.get(KafkaConnectorOptions.KEY_FORMAT.key());
    KafkaDynamicSink sink = (KafkaDynamicSink) createTableSink(SCHEMA, options);
    final Set<String> avroFormats = new HashSet<>();
    avroFormats.add(AVRO_CONFLUENT);
    avroFormats.add(DEBEZIUM_AVRO_CONFLUENT);
    if (avroFormats.contains(valueFormat)) {
        SerializationSchema<RowData> actualValueEncoder = sink.valueEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SCHEMA_DATA_TYPE);
        final SerializationSchema<RowData> expectedValueEncoder;
        if (AVRO_CONFLUENT.equals(valueFormat)) {
            expectedValueEncoder = createConfluentAvroSerSchema(rowType, expectedValueSubject);
        } else {
            expectedValueEncoder = createDebeziumAvroSerSchema(rowType, expectedValueSubject);
        }
        assertThat(actualValueEncoder).isEqualTo(expectedValueEncoder);
    }
    if (avroFormats.contains(keyFormat)) {
        assert sink.keyEncodingFormat != null;
        SerializationSchema<RowData> actualKeyEncoder = sink.keyEncodingFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), SCHEMA_DATA_TYPE);
        final SerializationSchema<RowData> expectedKeyEncoder;
        if (AVRO_CONFLUENT.equals(keyFormat)) {
            expectedKeyEncoder = createConfluentAvroSerSchema(rowType, expectedKeySubject);
        } else {
            expectedKeyEncoder = createDebeziumAvroSerSchema(rowType, expectedKeySubject);
        }
        assertThat(actualKeyEncoder).isEqualTo(expectedKeyEncoder);
    }
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) RowData(org.apache.flink.table.data.RowData) HashMap(java.util.HashMap) RowType(org.apache.flink.table.types.logical.RowType) HashSet(java.util.HashSet)

Example 18 with SinkRuntimeProviderContext

use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.

the class KafkaDynamicTableFactoryTest method testTableSink.

@Test
public void testTableSink() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> {
        options.put("sink.delivery-guarantee", "exactly-once");
        options.put("sink.transactional-id-prefix", "kafka-sink");
    });
    final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
    final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
    final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, null, "kafka-sink");
    assertThat(actualSink).isEqualTo(expectedSink);
    // Test kafka producer.
    final KafkaDynamicSink actualKafkaSink = (KafkaDynamicSink) actualSink;
    DynamicTableSink.SinkRuntimeProvider provider = actualKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider).isInstanceOf(SinkV2Provider.class);
    final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
    final Sink<RowData> sinkFunction = sinkProvider.createSink();
    assertThat(sinkFunction).isInstanceOf(KafkaSink.class);
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) EncodingFormatMock(org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) DebeziumAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.debezium.DebeziumAvroSerializationSchema) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) RowData(org.apache.flink.table.data.RowData) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 19 with SinkRuntimeProviderContext

use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testTableSink.

@Test
public void testTableSink() {
    // Construct table sink using options and table sink factory.
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getFullSinkOptions());
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, null);
    // Test sink format.
    final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
    assertEquals(expectedSink, actualSink);
    // Test kafka producer.
    DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(SinkV2Provider.class));
    final SinkV2Provider sinkFunctionProvider = (SinkV2Provider) provider;
    final Sink<RowData> sink = sinkFunctionProvider.createSink();
    assertThat(sink, instanceOf(KafkaSink.class));
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) KafkaSink(org.apache.flink.connector.kafka.sink.KafkaSink) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.Test)

Example 20 with SinkRuntimeProviderContext

use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.

the class DebeziumJsonFormatFactoryTest method testSeDeSchema.

@Test
public void testSeDeSchema() {
    final DebeziumJsonDeserializationSchema expectedDeser = new DebeziumJsonDeserializationSchema(PHYSICAL_DATA_TYPE, Collections.emptyList(), InternalTypeInfo.of(PHYSICAL_TYPE), false, true, TimestampFormat.ISO_8601);
    final Map<String, String> options = getAllOptions();
    final DynamicTableSource actualSource = createTableSource(SCHEMA, options);
    assert actualSource instanceof TestDynamicTableFactory.DynamicTableSourceMock;
    TestDynamicTableFactory.DynamicTableSourceMock scanSourceMock = (TestDynamicTableFactory.DynamicTableSourceMock) actualSource;
    DeserializationSchema<RowData> actualDeser = scanSourceMock.valueFormat.createRuntimeDecoder(ScanRuntimeProviderContext.INSTANCE, PHYSICAL_DATA_TYPE);
    assertEquals(expectedDeser, actualDeser);
    final DebeziumJsonSerializationSchema expectedSer = new DebeziumJsonSerializationSchema((RowType) PHYSICAL_DATA_TYPE.getLogicalType(), TimestampFormat.ISO_8601, JsonFormatOptions.MapNullKeyMode.LITERAL, "null", true);
    final DynamicTableSink actualSink = createTableSink(SCHEMA, options);
    assert actualSink instanceof TestDynamicTableFactory.DynamicTableSinkMock;
    TestDynamicTableFactory.DynamicTableSinkMock sinkMock = (TestDynamicTableFactory.DynamicTableSinkMock) actualSink;
    SerializationSchema<RowData> actualSer = sinkMock.valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), PHYSICAL_DATA_TYPE);
    assertEquals(expectedSer, actualSer);
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) TestDynamicTableFactory(org.apache.flink.table.factories.TestDynamicTableFactory) RowData(org.apache.flink.table.data.RowData) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) Test(org.junit.Test)

Aggregations

SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)27 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)23 RowData (org.apache.flink.table.data.RowData)19 Test (org.junit.Test)17 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)12 SinkV2Provider (org.apache.flink.table.connector.sink.SinkV2Provider)11 TestDynamicTableFactory (org.apache.flink.table.factories.TestDynamicTableFactory)7 HashMap (java.util.HashMap)5 RowType (org.apache.flink.table.types.logical.RowType)5 SinkFunctionProvider (org.apache.flink.table.connector.sink.SinkFunctionProvider)4 SerializationSchema (org.apache.flink.api.common.serialization.SerializationSchema)3 AvroRowDataSerializationSchema (org.apache.flink.formats.avro.AvroRowDataSerializationSchema)3 ConfluentRegistryAvroSerializationSchema (org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema)3 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)3 BinaryRowData (org.apache.flink.table.data.binary.BinaryRowData)3 EncodingFormatMock (org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock)3 Test (org.junit.jupiter.api.Test)3 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 Transformation (org.apache.flink.api.dag.Transformation)2 Configuration (org.apache.flink.configuration.Configuration)2