use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testBufferedTableSink.
@SuppressWarnings("rawtypes")
@Test
public void testBufferedTableSink() {
// Construct table sink using options and table sink factory.
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getModifiedOptions(getFullSinkOptions(), options -> {
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "1s");
}));
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, new SinkBufferFlushMode(100, 1000L), null);
// Test sink format.
final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
assertEquals(expectedSink, actualSink);
// Test kafka producer.
DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(DataStreamSinkProvider.class));
final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
sinkProvider.consumeDataStream(n -> Optional.empty(), env.fromElements(new BinaryRowData(1)));
final StreamOperatorFactory<?> sinkOperatorFactory = env.getStreamGraph().getStreamNodes().stream().filter(n -> n.getOperatorName().contains("Writer")).findFirst().orElseThrow(() -> new RuntimeException("Expected operator with name Sink in stream graph.")).getOperatorFactory();
assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class));
org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink();
assertThat(sink, instanceOf(ReducingUpsertSink.class));
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getFullSinkOptions(), options -> options.put("sink.parallelism", "100"));
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, modifiedOptions);
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, 100);
assertEquals(expectedSink, actualSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(SinkV2Provider.class));
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertTrue(sinkProvider.getParallelism().isPresent());
assertEquals(100, (long) sinkProvider.getParallelism().get());
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put("sink.parallelism", "100"));
KafkaDynamicSink actualSink = (KafkaDynamicSink) createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, 100, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertThat(sinkProvider.getParallelism().isPresent()).isTrue();
assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100);
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSinkWithKeyValue.
@Test
public void testTableSinkWithKeyValue() {
final Map<String, String> modifiedOptions = getModifiedOptions(getKeyValueOptions(), options -> {
options.put("sink.delivery-guarantee", "exactly-once");
options.put("sink.transactional-id-prefix", "kafka-sink");
});
final DynamicTableSink actualSink = createTableSink(SCHEMA, modifiedOptions);
final KafkaDynamicSink actualKafkaSink = (KafkaDynamicSink) actualSink;
// initialize stateful testing formats
actualKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
final EncodingFormatMock keyEncodingFormat = new EncodingFormatMock("#");
keyEncodingFormat.consumedDataType = DataTypes.ROW(DataTypes.FIELD(NAME, DataTypes.STRING().notNull())).notNull();
final EncodingFormatMock valueEncodingFormat = new EncodingFormatMock("|");
valueEncodingFormat.consumedDataType = DataTypes.ROW(DataTypes.FIELD(COUNT, DataTypes.DECIMAL(38, 18)), DataTypes.FIELD(TIME, DataTypes.TIMESTAMP(3))).notNull();
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, keyEncodingFormat, valueEncodingFormat, new int[] { 0 }, new int[] { 1, 2 }, null, TOPIC, KAFKA_FINAL_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, null, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class JsonFormatFactoryTest method testSchemaSerializationSchema.
private void testSchemaSerializationSchema(Map<String, String> options) {
final JsonRowDataSerializationSchema expectedSer = new JsonRowDataSerializationSchema(PHYSICAL_TYPE, TimestampFormat.ISO_8601, JsonFormatOptions.MapNullKeyMode.LITERAL, "null", true);
SerializationSchema<RowData> actualSer = createTableSink(options).valueFormat.createRuntimeEncoder(new SinkRuntimeProviderContext(false), PHYSICAL_DATA_TYPE);
assertThat(actualSer).isEqualTo(expectedSer);
}
Aggregations