use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class JdbcDynamicTableFactoryTest method testJDBCSinkWithParallelism.
@Test
public void testJDBCSinkWithParallelism() {
Map<String, String> properties = getAllOptions();
properties.put("sink.parallelism", "2");
DynamicTableSink actual = createTableSink(SCHEMA, properties);
JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setParallelism(2).build();
JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
JdbcDynamicTableSink expected = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
assertEquals(expected, actual);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class JdbcDynamicTableSinkITCase method testFlushBufferWhenCheckpoint.
@Test
public void testFlushBufferWhenCheckpoint() throws Exception {
Map<String, String> options = new HashMap<>();
options.put("connector", "jdbc");
options.put("url", DB_URL);
options.put("table-name", OUTPUT_TABLE5);
options.put("sink.buffer-flush.interval", "0");
ResolvedSchema schema = ResolvedSchema.of(Column.physical("id", DataTypes.BIGINT().notNull()));
DynamicTableSink tableSink = createTableSink(schema, options);
SinkRuntimeProviderContext context = new SinkRuntimeProviderContext(false);
SinkFunctionProvider sinkProvider = (SinkFunctionProvider) tableSink.getSinkRuntimeProvider(context);
GenericJdbcSinkFunction<RowData> sinkFunction = (GenericJdbcSinkFunction<RowData>) sinkProvider.createSinkFunction();
sinkFunction.setRuntimeContext(new MockStreamingRuntimeContext(true, 1, 0));
sinkFunction.open(new Configuration());
sinkFunction.invoke(GenericRowData.of(1L), SinkContextUtil.forTimestamp(1));
sinkFunction.invoke(GenericRowData.of(2L), SinkContextUtil.forTimestamp(1));
check(new Row[] {}, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
check(new Row[] { Row.of(1L), Row.of(2L) }, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
sinkFunction.close();
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testBufferedTableSink.
@SuppressWarnings("rawtypes")
@Test
public void testBufferedTableSink() {
// Construct table sink using options and table sink factory.
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getModifiedOptions(getFullSinkOptions(), options -> {
options.put("sink.buffer-flush.max-rows", "100");
options.put("sink.buffer-flush.interval", "1s");
}));
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, new SinkBufferFlushMode(100, 1000L), null);
// Test sink format.
final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
assertEquals(expectedSink, actualSink);
// Test kafka producer.
DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(DataStreamSinkProvider.class));
final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider;
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
sinkProvider.consumeDataStream(n -> Optional.empty(), env.fromElements(new BinaryRowData(1)));
final StreamOperatorFactory<?> sinkOperatorFactory = env.getStreamGraph().getStreamNodes().stream().filter(n -> n.getOperatorName().contains("Writer")).findFirst().orElseThrow(() -> new RuntimeException("Expected operator with name Sink in stream graph.")).getOperatorFactory();
assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class));
org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink();
assertThat(sink, instanceOf(ReducingUpsertSink.class));
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getFullSinkOptions(), options -> options.put("sink.parallelism", "100"));
final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, modifiedOptions);
final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, 100);
assertEquals(expectedSink, actualSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider, instanceOf(SinkV2Provider.class));
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertTrue(sinkProvider.getParallelism().isPresent());
assertEquals(100, (long) sinkProvider.getParallelism().get());
}
use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.
the class KafkaDynamicTableFactoryTest method testTableSinkWithParallelism.
@Test
public void testTableSinkWithParallelism() {
final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put("sink.parallelism", "100"));
KafkaDynamicSink actualSink = (KafkaDynamicSink) createTableSink(SCHEMA, modifiedOptions);
final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, 100, "kafka-sink");
assertThat(actualSink).isEqualTo(expectedSink);
final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
assertThat(provider).isInstanceOf(SinkV2Provider.class);
final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
assertThat(sinkProvider.getParallelism().isPresent()).isTrue();
assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100);
}
Aggregations