Search in sources :

Example 16 with DynamicTableSink

use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.

the class JdbcDynamicTableFactoryTest method testJDBCSinkWithParallelism.

@Test
public void testJDBCSinkWithParallelism() {
    Map<String, String> properties = getAllOptions();
    properties.put("sink.parallelism", "2");
    DynamicTableSink actual = createTableSink(SCHEMA, properties);
    JdbcConnectorOptions options = JdbcConnectorOptions.builder().setDBUrl("jdbc:derby:memory:mydb").setTableName("mytable").setParallelism(2).build();
    JdbcExecutionOptions executionOptions = JdbcExecutionOptions.builder().withBatchSize(100).withBatchIntervalMs(1000).withMaxRetries(3).build();
    JdbcDmlOptions dmlOptions = JdbcDmlOptions.builder().withTableName(options.getTableName()).withDialect(options.getDialect()).withFieldNames(SCHEMA.getColumnNames().toArray(new String[0])).withKeyFields("bbb", "aaa").build();
    JdbcDynamicTableSink expected = new JdbcDynamicTableSink(options, executionOptions, dmlOptions, SCHEMA.toPhysicalRowDataType());
    assertEquals(expected, actual);
}
Also used : JdbcExecutionOptions(org.apache.flink.connector.jdbc.JdbcExecutionOptions) JdbcConnectorOptions(org.apache.flink.connector.jdbc.internal.options.JdbcConnectorOptions) JdbcDmlOptions(org.apache.flink.connector.jdbc.internal.options.JdbcDmlOptions) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.Test)

Example 17 with DynamicTableSink

use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.

the class JdbcDynamicTableSinkITCase method testFlushBufferWhenCheckpoint.

@Test
public void testFlushBufferWhenCheckpoint() throws Exception {
    Map<String, String> options = new HashMap<>();
    options.put("connector", "jdbc");
    options.put("url", DB_URL);
    options.put("table-name", OUTPUT_TABLE5);
    options.put("sink.buffer-flush.interval", "0");
    ResolvedSchema schema = ResolvedSchema.of(Column.physical("id", DataTypes.BIGINT().notNull()));
    DynamicTableSink tableSink = createTableSink(schema, options);
    SinkRuntimeProviderContext context = new SinkRuntimeProviderContext(false);
    SinkFunctionProvider sinkProvider = (SinkFunctionProvider) tableSink.getSinkRuntimeProvider(context);
    GenericJdbcSinkFunction<RowData> sinkFunction = (GenericJdbcSinkFunction<RowData>) sinkProvider.createSinkFunction();
    sinkFunction.setRuntimeContext(new MockStreamingRuntimeContext(true, 1, 0));
    sinkFunction.open(new Configuration());
    sinkFunction.invoke(GenericRowData.of(1L), SinkContextUtil.forTimestamp(1));
    sinkFunction.invoke(GenericRowData.of(2L), SinkContextUtil.forTimestamp(1));
    check(new Row[] {}, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
    sinkFunction.snapshotState(new StateSnapshotContextSynchronousImpl(1, 1));
    check(new Row[] { Row.of(1L), Row.of(2L) }, DB_URL, OUTPUT_TABLE5, new String[] { "id" });
    sinkFunction.close();
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) MockStreamingRuntimeContext(org.apache.flink.streaming.util.MockStreamingRuntimeContext) Configuration(org.apache.flink.configuration.Configuration) HashMap(java.util.HashMap) StateSnapshotContextSynchronousImpl(org.apache.flink.runtime.state.StateSnapshotContextSynchronousImpl) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) SinkFunctionProvider(org.apache.flink.table.connector.sink.SinkFunctionProvider) GenericRowData(org.apache.flink.table.data.GenericRowData) RowData(org.apache.flink.table.data.RowData) GenericJdbcSinkFunction(org.apache.flink.connector.jdbc.internal.GenericJdbcSinkFunction) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 18 with DynamicTableSink

use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testBufferedTableSink.

@SuppressWarnings("rawtypes")
@Test
public void testBufferedTableSink() {
    // Construct table sink using options and table sink factory.
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getModifiedOptions(getFullSinkOptions(), options -> {
        options.put("sink.buffer-flush.max-rows", "100");
        options.put("sink.buffer-flush.interval", "1s");
    }));
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, new SinkBufferFlushMode(100, 1000L), null);
    // Test sink format.
    final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
    assertEquals(expectedSink, actualSink);
    // Test kafka producer.
    DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(DataStreamSinkProvider.class));
    final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    sinkProvider.consumeDataStream(n -> Optional.empty(), env.fromElements(new BinaryRowData(1)));
    final StreamOperatorFactory<?> sinkOperatorFactory = env.getStreamGraph().getStreamNodes().stream().filter(n -> n.getOperatorName().contains("Writer")).findFirst().orElseThrow(() -> new RuntimeException("Expected operator with name Sink in stream graph.")).getOperatorFactory();
    assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class));
    org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink();
    assertThat(sink, instanceOf(ReducingUpsertSink.class));
}
Also used : DataType(org.apache.flink.table.types.DataType) AtomicDataType(org.apache.flink.table.types.AtomicDataType) Arrays(java.util.Arrays) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) SourceTransformation(org.apache.flink.streaming.api.transformations.SourceTransformation) DataStreamScanProvider(org.apache.flink.table.connector.source.DataStreamScanProvider) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) DecodingFormat(org.apache.flink.table.connector.format.DecodingFormat) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) FactoryMocks.createTableSink(org.apache.flink.table.factories.utils.FactoryMocks.createTableSink) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) FlinkMatchers.containsCause(org.apache.flink.core.testutils.FlinkMatchers.containsCause) AVRO_CONFLUENT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.AVRO_CONFLUENT) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) ScanRuntimeProviderContext(org.apache.flink.table.runtime.connector.source.ScanRuntimeProviderContext) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) TestFormatFactory(org.apache.flink.table.factories.TestFormatFactory) DeliveryGuarantee(org.apache.flink.connector.base.DeliveryGuarantee) EncodingFormat(org.apache.flink.table.connector.format.EncodingFormat) Sink(org.apache.flink.api.connector.sink2.Sink) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) StreamOperatorFactory(org.apache.flink.streaming.api.operators.StreamOperatorFactory) Column(org.apache.flink.table.catalog.Column) HashMap(java.util.HashMap) RowType(org.apache.flink.table.types.logical.RowType) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) KafkaSink(org.apache.flink.connector.kafka.sink.KafkaSink) RowDataToAvroConverters(org.apache.flink.formats.avro.RowDataToAvroConverters) FactoryMocks.createTableSource(org.apache.flink.table.factories.utils.FactoryMocks.createTableSource) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) SinkWriterOperatorFactory(org.apache.flink.streaming.runtime.operators.sink.SinkWriterOperatorFactory) ExpectedException(org.junit.rules.ExpectedException) RowData(org.apache.flink.table.data.RowData) Properties(java.util.Properties) Assert.assertTrue(org.junit.Assert.assertTrue) DataTypes(org.apache.flink.table.api.DataTypes) VarCharType(org.apache.flink.table.types.logical.VarCharType) Test(org.junit.Test) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) KafkaSourceEnumState(org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumState) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema) Consumer(java.util.function.Consumer) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) Rule(org.junit.Rule) KafkaSource(org.apache.flink.connector.kafka.source.KafkaSource) UniqueConstraint(org.apache.flink.table.catalog.UniqueConstraint) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) FactoryMocks(org.apache.flink.table.factories.utils.FactoryMocks) KafkaPartitionSplit(org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit) Transformation(org.apache.flink.api.dag.Transformation) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) AvroSchemaConverter(org.apache.flink.formats.avro.typeutils.AvroSchemaConverter) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) Sink(org.apache.flink.api.connector.sink2.Sink) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) SinkWriterOperatorFactory(org.apache.flink.streaming.runtime.operators.sink.SinkWriterOperatorFactory) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 19 with DynamicTableSink

use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testTableSinkWithParallelism.

@Test
public void testTableSinkWithParallelism() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getFullSinkOptions(), options -> options.put("sink.parallelism", "100"));
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, modifiedOptions);
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, SinkBufferFlushMode.DISABLED, 100);
    assertEquals(expectedSink, actualSink);
    final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(SinkV2Provider.class));
    final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
    assertTrue(sinkProvider.getParallelism().isPresent());
    assertEquals(100, (long) sinkProvider.getParallelism().get());
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.Test)

Example 20 with DynamicTableSink

use of org.apache.flink.table.connector.sink.DynamicTableSink in project flink by apache.

the class KafkaDynamicTableFactoryTest method testTableSinkWithParallelism.

@Test
public void testTableSinkWithParallelism() {
    final Map<String, String> modifiedOptions = getModifiedOptions(getBasicSinkOptions(), options -> options.put("sink.parallelism", "100"));
    KafkaDynamicSink actualSink = (KafkaDynamicSink) createTableSink(SCHEMA, modifiedOptions);
    final EncodingFormat<SerializationSchema<RowData>> valueEncodingFormat = new EncodingFormatMock(",");
    final DynamicTableSink expectedSink = createExpectedSink(SCHEMA_DATA_TYPE, null, valueEncodingFormat, new int[0], new int[] { 0, 1, 2 }, null, TOPIC, KAFKA_SINK_PROPERTIES, new FlinkFixedPartitioner<>(), DeliveryGuarantee.EXACTLY_ONCE, 100, "kafka-sink");
    assertThat(actualSink).isEqualTo(expectedSink);
    final DynamicTableSink.SinkRuntimeProvider provider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider).isInstanceOf(SinkV2Provider.class);
    final SinkV2Provider sinkProvider = (SinkV2Provider) provider;
    assertThat(sinkProvider.getParallelism().isPresent()).isTrue();
    assertThat((long) sinkProvider.getParallelism().get()).isEqualTo(100);
}
Also used : SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) EncodingFormatMock(org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) DebeziumAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.debezium.DebeziumAvroSerializationSchema) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)54 Test (org.junit.Test)34 SinkRuntimeProviderContext (org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext)23 RowData (org.apache.flink.table.data.RowData)21 ResolvedSchema (org.apache.flink.table.catalog.ResolvedSchema)19 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)14 SinkV2Provider (org.apache.flink.table.connector.sink.SinkV2Provider)12 TestDynamicTableFactory (org.apache.flink.table.factories.TestDynamicTableFactory)12 Test (org.junit.jupiter.api.Test)10 EncodingFormatMock (org.apache.flink.table.factories.TestFormatFactory.EncodingFormatMock)8 HashMap (java.util.HashMap)7 HBaseWriteOptions (org.apache.flink.connector.hbase.options.HBaseWriteOptions)6 AvroRowDataSerializationSchema (org.apache.flink.formats.avro.AvroRowDataSerializationSchema)6 SinkFunctionProvider (org.apache.flink.table.connector.sink.SinkFunctionProvider)5 Collections (java.util.Collections)4 HBaseDynamicTableSink (org.apache.flink.connector.hbase2.sink.HBaseDynamicTableSink)4 SupportsPartitioning (org.apache.flink.table.connector.sink.abilities.SupportsPartitioning)4 DataType (org.apache.flink.table.types.DataType)4 RowType (org.apache.flink.table.types.logical.RowType)4 ArrayList (java.util.ArrayList)3