Search in sources :

Example 6 with SinkV2Provider

use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.

the class ElasticsearchDynamicSinkFactoryBaseTest method testSinkParallelism.

@Test
public void testSinkParallelism() {
    ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
    DynamicTableSink sink = sinkFactory.createDynamicTableSink(createPrefilledTestContext().withOption(SINK_PARALLELISM.key(), "2").build());
    assertThat(sink).isInstanceOf(ElasticsearchDynamicSink.class);
    ElasticsearchDynamicSink esSink = (ElasticsearchDynamicSink) sink;
    SinkV2Provider provider = (SinkV2Provider) esSink.getSinkRuntimeProvider(new ElasticsearchUtil.MockContext());
    assertThat(2).isEqualTo(provider.getParallelism().get());
}
Also used : SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.jupiter.api.Test)

Example 7 with SinkV2Provider

use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testTableSinkWithParallelism.

@Test
public void testTableSinkWithParallelism() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getFullSinkOptions(), options -> options.put("sink.parallelism", "100"));
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, modifiedOptions);
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, 100);
    assertEquals(expectedSink, actualSink);
    final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(SinkV2Provider.class));
    final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
    assertTrue(sinkProvider.getParallelism().isPresent());
    assertEquals(100, (long) sinkProvider.getParallelism().get());
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.Test)

Example 8 with SinkV2Provider

use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.

the class KafkaDynamicTableFactoryTest method testTableSinkWithParallelism.

@Test
public void testTableSinkWithParallelism() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put("sink.parallelism", "100"));
    KafkaDynamicSink actualSink = (KafkaDynamicSink) createTableSink(SCHEMA, modifiedOptions);
    final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
    final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, 100, "kafka-sink");
    assertThat(actualSink).isEqualTo(expectedSink);
    final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider).isInstanceOf(SinkV2Provider.class);
    final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
    assertThat(sinkProvider.getParallelism().isPresent()).isTrue();
    assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100);
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) EncodingFormatMock(org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) DebeziumAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.debezium.DebeziumAvroSerializationSchema) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 9 with SinkV2Provider

use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.

the class KafkaDynamicTableFactoryTest method testTableSink.

@Test
public void testTableSink() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> {
        options.put("sink.delivery-guarantee", "exactly-once");
        options.put("sink.transactional-id-prefix", "kafka-sink");
    });
    final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
    final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
    final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, null, "kafka-sink");
    assertThat(actualSink).isEqualTo(expectedSink);
    // Test kafka producer.
    final KafkaDynamicSink actualKafkaSink = (KafkaDynamicSink) actualSink;
    DynamicTableSink.SinkRuntimeProvider provider = actualKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider).isInstanceOf(SinkV2Provider.class);
    final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
    final Sink<RowData> sinkFunction = sinkProvider.createSink();
    assertThat(sinkFunction).isInstanceOf(KafkaSink.class);
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) EncodingFormatMock(org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) DebeziumAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.debezium.DebeziumAvroSerializationSchema) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) RowData(org.apache.flink.table.data.RowData) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 10 with SinkV2Provider

use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testTableSink.

@Test
public void testTableSink() {
    // Construct table sink using options and table sink factory.
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getFullSinkOptions());
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, null);
    // Test sink format.
    final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
    assertEquals(expectedSink, actualSink);
    // Test kafka producer.
    DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(SinkV2Provider.class));
    final SinkV2Provider sinkFunctionProvider = (SinkV2Provider) provider;
    final Sink<RowData> sink = sinkFunctionProvider.createSink();
    assertThat(sink, instanceOf(KafkaSink.class));
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) RowData(org.apache.flink.table.data.RowData) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) KafkaSink(org.apache.flink.connector.kafka.sink.KafkaSink) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.Test)

Aggregations

SinkV2Provider (org.apache.flink.table.connector.sink.SinkV2Provider)13 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)12 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)11 RowData (org.apache.flink.table.data.RowData)10 Test (org.junit.Test)8 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)7 Test (org.junit.jupiter.api.Test)4 SerializationSchema (org.apache.flink.api.common.serialization.SerializationSchema)2 AvroRowDataSerializationSchema (org.apache.flink.formats.avro.AvroRowDataSerializationSchema)2 ConfluentRegistryAvroSerializationSchema (org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema)2 DebeziumAvroSerializationSchema (org.apache.flink.formats.avro.registry.confluent.debezium.DebeziumAvroSerializationSchema)2 EncodingFormatMock (org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 HashMap (java.util.HashMap)1 OutputFormat (org.apache.flink.api.common.io.OutputFormat)1 Transformation (org.apache.flink.api.dag.Transformation)1 ElasticsearchUtil (org.apache.flink.connector.elasticsearch.ElasticsearchUtil)1 KafkaSink (org.apache.flink.connector.kafka.sink.KafkaSink)1 DataStream (org.apache.flink.streaming.api.datastream.DataStream)1 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)1