use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class ElasticsearchDynamicSinkFactoryBaseTest method testSinkParallelism.
@Test
public void testSinkParallelism() {
ElasticsearchDynamicSinkFactoryBase sinkFactory = createSinkFactory();
DynamicTableSink sink = sinkFactory.createDynamicTableSink(createPrefilledTestContext().withOption(SINK_PARALLELISM.key(), "2").build());
assertThat(sink).isInstanceOf(ElasticsearchDynamicSink.class);
ElasticsearchDynamicSink esSink = (ElasticsearchDynamicSink) sink;
SinkV2Provider provider = (SinkV2Provider) esSink.getSinkRuntimeProvider(new ElasticsearchUtil.MockContext());
assertThat(2).isEqualTo(provider.getParallelism().get());
}
use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getFullSinkOptions(), options -> options.put("sink.parallelism", "100"));
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, modifiedOptions);
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, 100);
assertEquals(expectedSink, actualSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(SinkV2Provider.class));
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertTrue(sinkProvider.getParallelism().isPresent());
assertEquals(100, (long) sinkProvider.getParallelism().get());
}
use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put("sink.parallelism", "100"));
KafkaDynamicSink actualSink = (KafkaDynamicSink) createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, 100, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertThat(sinkProvider.getParallelism().isPresent()).isTrue();
assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100);
}
use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSink.
@Test
public void testTableSink() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> {
options.put("sink.delivery-guarantee", "exactly-once");
options.put("sink.transactional-id-prefix", "kafka-sink");
});
final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, null, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
// Test kafka producer.
final KafkaDynamicSink actualKafkaSink = (KafkaDynamicSink) actualSink;
DynamicTableSink.SinkRuntimeProvider provider = actualKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
final Sink<RowData> sinkFunction = sinkProvider.createSink();
assertThat(sinkFunction).isInstanceOf(KafkaSink.class);
}
use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSink.
@Test
public void testTableSink() {
// Construct table sink using options and table sink factory.
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getFullSinkOptions());
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, null);
// Test sink format.
final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
assertEquals(expectedSink, actualSink);
// Test kafka producer.
DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(SinkV2Provider.class));
final SinkV2Provider sinkFunctionProvider = (SinkV2Provider) provider;
final Sink<RowData> sink = sinkFunctionProvider.createSink();
assertThat(sink, instanceOf(KafkaSink.class));
}
Aggregations