Search in sources :

Example 1 with DataStreamSinkProvider

use of org.apache.flink.table.connector.sink.DataStreamSinkProvider in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testBufferedTableSink.

@SuppressWarnings("rawtypes")
@Test
public void testBufferedTableSink() {
    // Construct table sink using options and table sink factory.
    final DynamicTableSink actualSink = createTableSink(SINK_SCHEMA, getModifiedOptions(getFullSinkOptions(), options -> {
        options.put("sink.buffer-flush.max-rows", "100");
        options.put("sink.buffer-flush.interval", "1s");
    }));
    final DynamicTableSink expectedSink = createExpectedSink(SINK_SCHEMA.toPhysicalRowDataType(), keyEncodingFormat, valueEncodingFormat, SINK_KEY_FIELDS, SINK_VALUE_FIELDS, null, SINK_TOPIC, UPSERT_KAFKA_SINK_PROPERTIES, DeliveryGuarantee.AT_LEAST_ONCE, new SinkBufferFlushMode(100, 1000L), null);
    // Test sink format.
    final KafkaDynamicSink actualUpsertKafkaSink = (KafkaDynamicSink) actualSink;
    assertEquals(expectedSink, actualSink);
    // Test kafka producer.
    DynamicTableSink.SinkRuntimeProvider provider = actualUpsertKafkaSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
    assertThat(provider, instanceOf(DataStreamSinkProvider.class));
    final DataStreamSinkProvider sinkProvider = (DataStreamSinkProvider) provider;
    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    sinkProvider.consumeDataStream(n -> Optional.empty(), env.fromElements(new BinaryRowData(1)));
    final StreamOperatorFactory<?> sinkOperatorFactory = env.getStreamGraph().getStreamNodes().stream().filter(n -> n.getOperatorName().contains("Writer")).findFirst().orElseThrow(() -> new RuntimeException("Expected operator with name Sink in stream graph.")).getOperatorFactory();
    assertThat(sinkOperatorFactory, instanceOf(SinkWriterOperatorFactory.class));
    org.apache.flink.api.connector.sink2.Sink sink = ((SinkWriterOperatorFactory) sinkOperatorFactory).getSink();
    assertThat(sink, instanceOf(ReducingUpsertSink.class));
}
Also used : DataType(org.apache.flink.table.types.DataType) AtomicDataType(org.apache.flink.table.types.AtomicDataType) Arrays(java.util.Arrays) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) SourceTransformation(org.apache.flink.streaming.api.transformations.SourceTransformation) DataStreamScanProvider(org.apache.flink.table.connector.source.DataStreamScanProvider) CoreMatchers.instanceOf(org.hamcrest.CoreMatchers.instanceOf) DecodingFormat(org.apache.flink.table.connector.format.DecodingFormat) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) FactoryMocks.createTableSink(org.apache.flink.table.factories.utils.FactoryMocks.createTableSink) ConfluentRegistryAvroSerializationSchema(org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroSerializationSchema) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) FlinkMatchers.containsCause(org.apache.flink.core.testutils.FlinkMatchers.containsCause) AVRO_CONFLUENT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptionsUtil.AVRO_CONFLUENT) AvroRowDataSerializationSchema(org.apache.flink.formats.avro.AvroRowDataSerializationSchema) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) ScanRuntimeProviderContext(org.apache.flink.table.runtime.connector.source.ScanRuntimeProviderContext) SerializationSchema(org.apache.flink.api.common.serialization.SerializationSchema) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) TestFormatFactory(org.apache.flink.table.factories.TestFormatFactory) DeliveryGuarantee(org.apache.flink.connector.base.DeliveryGuarantee) EncodingFormat(org.apache.flink.table.connector.format.EncodingFormat) Sink(org.apache.flink.api.connector.sink2.Sink) ChangelogMode(org.apache.flink.table.connector.ChangelogMode) StreamOperatorFactory(org.apache.flink.streaming.api.operators.StreamOperatorFactory) Column(org.apache.flink.table.catalog.Column) HashMap(java.util.HashMap) RowType(org.apache.flink.table.types.logical.RowType) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) KafkaSink(org.apache.flink.connector.kafka.sink.KafkaSink) RowDataToAvroConverters(org.apache.flink.formats.avro.RowDataToAvroConverters) FactoryMocks.createTableSource(org.apache.flink.table.factories.utils.FactoryMocks.createTableSource) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) SinkWriterOperatorFactory(org.apache.flink.streaming.runtime.operators.sink.SinkWriterOperatorFactory) ExpectedException(org.junit.rules.ExpectedException) RowData(org.apache.flink.table.data.RowData) Properties(java.util.Properties) Assert.assertTrue(org.junit.Assert.assertTrue) DataTypes(org.apache.flink.table.api.DataTypes) VarCharType(org.apache.flink.table.types.logical.VarCharType) Test(org.junit.Test) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) KafkaSourceEnumState(org.apache.flink.connector.kafka.source.enumerator.KafkaSourceEnumState) DeserializationSchema(org.apache.flink.api.common.serialization.DeserializationSchema) Consumer(java.util.function.Consumer) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) Rule(org.junit.Rule) KafkaSource(org.apache.flink.connector.kafka.source.KafkaSource) UniqueConstraint(org.apache.flink.table.catalog.UniqueConstraint) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) FactoryMocks(org.apache.flink.table.factories.utils.FactoryMocks) KafkaPartitionSplit(org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit) Transformation(org.apache.flink.api.dag.Transformation) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) AvroSchemaConverter(org.apache.flink.formats.avro.typeutils.AvroSchemaConverter) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) Sink(org.apache.flink.api.connector.sink2.Sink) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) SinkWriterOperatorFactory(org.apache.flink.streaming.runtime.operators.sink.SinkWriterOperatorFactory) BinaryRowData(org.apache.flink.table.data.binary.BinaryRowData) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Test(org.junit.Test)

Example 2 with DataStreamSinkProvider

use of org.apache.flink.table.connector.sink.DataStreamSinkProvider in project flink by apache.

the class CommonExecSink method applySinkProvider.

private Transformation<?> applySinkProvider(Transformation<RowData> inputTransform, StreamExecutionEnvironment env, SinkRuntimeProvider runtimeProvider, int rowtimeFieldIndex, int sinkParallelism, ReadableConfig config) {
    TransformationMetadata sinkMeta = createTransformationMeta(SINK_TRANSFORMATION, config);
    if (runtimeProvider instanceof DataStreamSinkProvider) {
        Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
        final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
        final DataStreamSinkProvider provider = (DataStreamSinkProvider) runtimeProvider;
        return provider.consumeDataStream(createProviderContext(), dataStream).getTransformation();
    } else if (runtimeProvider instanceof TransformationSinkProvider) {
        final TransformationSinkProvider provider = (TransformationSinkProvider) runtimeProvider;
        return provider.createTransformation(new TransformationSinkProvider.Context() {

            @Override
            public Transformation<RowData> getInputTransformation() {
                return inputTransform;
            }

            @Override
            public int getRowtimeIndex() {
                return rowtimeFieldIndex;
            }

            @Override
            public Optional<String> generateUid(String name) {
                return createProviderContext().generateUid(name);
            }
        });
    } else if (runtimeProvider instanceof SinkFunctionProvider) {
        final SinkFunction<RowData> sinkFunction = ((SinkFunctionProvider) runtimeProvider).createSinkFunction();
        return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
    } else if (runtimeProvider instanceof OutputFormatProvider) {
        OutputFormat<RowData> outputFormat = ((OutputFormatProvider) runtimeProvider).createOutputFormat();
        final SinkFunction<RowData> sinkFunction = new OutputFormatSinkFunction<>(outputFormat);
        return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
    } else if (runtimeProvider instanceof SinkProvider) {
        Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
        final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
        final Transformation<?> transformation = DataStreamSink.forSinkV1(dataStream, ((SinkProvider) runtimeProvider).createSink()).getTransformation();
        transformation.setParallelism(sinkParallelism);
        sinkMeta.fill(transformation);
        return transformation;
    } else if (runtimeProvider instanceof SinkV2Provider) {
        Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
        final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
        final Transformation<?> transformation = DataStreamSink.forSink(dataStream, ((SinkV2Provider) runtimeProvider).createSink()).getTransformation();
        transformation.setParallelism(sinkParallelism);
        sinkMeta.fill(transformation);
        return transformation;
    } else {
        throw new TableException("Unsupported sink runtime provider.");
    }
}
Also used : ExecNodeContext(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeContext) ProviderContext(org.apache.flink.table.connector.ProviderContext) SinkRuntimeProviderContext(org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext) TransformationMetadata(org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata) PartitionTransformation(org.apache.flink.streaming.api.transformations.PartitionTransformation) LegacySinkTransformation(org.apache.flink.streaming.api.transformations.LegacySinkTransformation) OneInputTransformation(org.apache.flink.streaming.api.transformations.OneInputTransformation) Transformation(org.apache.flink.api.dag.Transformation) TableException(org.apache.flink.table.api.TableException) DataStream(org.apache.flink.streaming.api.datastream.DataStream) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) OutputFormat(org.apache.flink.api.common.io.OutputFormat) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) TransformationSinkProvider(org.apache.flink.table.planner.connectors.TransformationSinkProvider) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) SinkFunctionProvider(org.apache.flink.table.connector.sink.SinkFunctionProvider) RowData(org.apache.flink.table.data.RowData) TransformationSinkProvider(org.apache.flink.table.planner.connectors.TransformationSinkProvider) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) OutputFormatSinkFunction(org.apache.flink.streaming.api.functions.sink.OutputFormatSinkFunction) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) OutputFormatProvider(org.apache.flink.table.connector.sink.OutputFormatProvider)

Example 3 with DataStreamSinkProvider

use of org.apache.flink.table.connector.sink.DataStreamSinkProvider in project flink by apache.

the class CommonExecSinkITCase method testFromValuesWatermarkPropagation.

@Test
public void testFromValuesWatermarkPropagation() throws Exception {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final SharedReference<List<Long>> watermarks = sharedObjects.add(new ArrayList<>());
    final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {

        @Override
        public void writeWatermark(org.apache.flink.api.common.eventtime.Watermark watermark) {
            addElement(watermarks, watermark.getTimestamp());
        }
    };
    final TableDescriptor sinkDescriptor = TableFactoryHarness.newBuilder().sink(new TableFactoryHarness.SinkBase() {

        @Override
        public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
            return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
        }
    }).build();
    final Table source = tableEnv.fromValues(DataTypes.ROW(DataTypes.FIELD("a", DataTypes.INT())), Row.of(1), Row.of(2), Row.of(3));
    source.executeInsert(sinkDescriptor).await();
    assertThat(watermarks.get().size()).isEqualTo(env.getParallelism());
    for (Long watermark : watermarks.get()) {
        assertThat(watermark).isEqualTo(Watermark.MAX_WATERMARK.getTimestamp());
    }
}
Also used : StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Arrays(java.util.Arrays) Schema(org.apache.flink.table.api.Schema) TableDescriptor(org.apache.flink.table.api.TableDescriptor) SinkV1Adapter(org.apache.flink.streaming.api.transformations.SinkV1Adapter) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) ExplainDetail(org.apache.flink.table.api.ExplainDetail) ExceptionUtils(org.apache.flink.util.ExceptionUtils) TestSink(org.apache.flink.streaming.runtime.operators.sink.TestSink) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) TableFactoryHarness(org.apache.flink.table.planner.factories.TableFactoryHarness) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Collection(java.util.Collection) Table(org.apache.flink.table.api.Table) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) List(java.util.List) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) TableResult(org.apache.flink.table.api.TableResult) Row(org.apache.flink.types.Row) NotNull(org.jetbrains.annotations.NotNull) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) Watermark(org.apache.flink.streaming.api.watermark.Watermark) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) ArrayList(java.util.ArrayList) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) TABLE_EXEC_SINK_NOT_NULL_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER) SourceFunctionProvider(org.apache.flink.table.connector.source.SourceFunctionProvider) SharedReference(org.apache.flink.testutils.junit.SharedReference) INT(org.apache.flink.table.api.DataTypes.INT) Before(org.junit.Before) RowData(org.apache.flink.table.data.RowData) DataTypes(org.apache.flink.table.api.DataTypes) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) Test(org.junit.Test) ExecutionException(java.util.concurrent.ExecutionException) Rule(org.junit.Rule) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) Table(org.apache.flink.table.api.Table) TableDescriptor(org.apache.flink.table.api.TableDescriptor) RowData(org.apache.flink.table.data.RowData) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Watermark(org.apache.flink.streaming.api.watermark.Watermark) Test(org.junit.Test)

Example 4 with DataStreamSinkProvider

use of org.apache.flink.table.connector.sink.DataStreamSinkProvider in project flink by apache.

the class CommonExecSinkITCase method testStreamRecordTimestampInserterDataStreamSinkProvider.

@Test
public void testStreamRecordTimestampInserterDataStreamSinkProvider() throws ExecutionException, InterruptedException {
    final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
    final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
    final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T11:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T12:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
    final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {

        @Override
        public void invoke(RowData value, Context context) {
            addElement(timestamps, context.timestamp());
        }
    };
    final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(new TableFactoryHarness.SinkBase() {

        @Override
        public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
            return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
        }
    }).build();
    tableEnv.createTable("T1", sourceDescriptor);
    final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
    assertPlan(tableEnv, sqlStmt, true);
    tableEnv.executeSql(sqlStmt).await();
    Collections.sort(timestamps.get());
    assertTimestampResults(timestamps, rows);
}
Also used : StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Arrays(java.util.Arrays) Schema(org.apache.flink.table.api.Schema) TableDescriptor(org.apache.flink.table.api.TableDescriptor) SinkV1Adapter(org.apache.flink.streaming.api.transformations.SinkV1Adapter) SharedObjects(org.apache.flink.testutils.junit.SharedObjects) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) ExplainDetail(org.apache.flink.table.api.ExplainDetail) ExceptionUtils(org.apache.flink.util.ExceptionUtils) TestSink(org.apache.flink.streaming.runtime.operators.sink.TestSink) Parameterized(org.junit.runners.Parameterized) AbstractTestBase(org.apache.flink.test.util.AbstractTestBase) TableFactoryHarness(org.apache.flink.table.planner.factories.TableFactoryHarness) DynamicTableSource(org.apache.flink.table.connector.source.DynamicTableSource) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) DynamicTableSink(org.apache.flink.table.connector.sink.DynamicTableSink) Collection(java.util.Collection) Table(org.apache.flink.table.api.Table) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) List(java.util.List) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ValidationException(org.apache.flink.table.api.ValidationException) TableResult(org.apache.flink.table.api.TableResult) Row(org.apache.flink.types.Row) NotNull(org.jetbrains.annotations.NotNull) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) Watermark(org.apache.flink.streaming.api.watermark.Watermark) ScanTableSource(org.apache.flink.table.connector.source.ScanTableSource) ArrayList(java.util.ArrayList) SinkV2Provider(org.apache.flink.table.connector.sink.SinkV2Provider) TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_TYPE_LENGTH_ENFORCER) SourceFunction(org.apache.flink.streaming.api.functions.source.SourceFunction) TABLE_EXEC_SINK_NOT_NULL_ENFORCER(org.apache.flink.table.api.config.ExecutionConfigOptions.TABLE_EXEC_SINK_NOT_NULL_ENFORCER) SourceFunctionProvider(org.apache.flink.table.connector.source.SourceFunctionProvider) SharedReference(org.apache.flink.testutils.junit.SharedReference) INT(org.apache.flink.table.api.DataTypes.INT) Before(org.junit.Before) RowData(org.apache.flink.table.data.RowData) DataTypes(org.apache.flink.table.api.DataTypes) SinkProvider(org.apache.flink.table.connector.sink.SinkProvider) Test(org.junit.Test) ExecutionException(java.util.concurrent.ExecutionException) Rule(org.junit.Rule) ExecutionConfigOptions(org.apache.flink.table.api.config.ExecutionConfigOptions) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TableDescriptor(org.apache.flink.table.api.TableDescriptor) RowData(org.apache.flink.table.data.RowData) SinkFunction(org.apache.flink.streaming.api.functions.sink.SinkFunction) List(java.util.List) ArrayList(java.util.ArrayList) StreamTableEnvironment(org.apache.flink.table.api.bridge.java.StreamTableEnvironment) Row(org.apache.flink.types.Row) Test(org.junit.Test)

Example 5 with DataStreamSinkProvider

use of org.apache.flink.table.connector.sink.DataStreamSinkProvider in project flink by apache.

the class CollectDynamicSink method getSinkRuntimeProvider.

@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
    return (DataStreamSinkProvider) (providerContext, inputStream) -> {
        final CheckpointConfig checkpointConfig = inputStream.getExecutionEnvironment().getCheckpointConfig();
        final ExecutionConfig config = inputStream.getExecutionConfig();
        final TypeSerializer<RowData> externalSerializer = InternalTypeInfo.<RowData>of(consumedDataType.getLogicalType()).createSerializer(config);
        final String accumulatorName = tableIdentifier.getObjectName();
        final CollectSinkOperatorFactory<RowData> factory = new CollectSinkOperatorFactory<>(externalSerializer, accumulatorName, maxBatchSize, socketTimeout);
        final CollectSinkOperator<RowData> operator = (CollectSinkOperator<RowData>) factory.getOperator();
        iterator = new CollectResultIterator<>(operator.getOperatorIdFuture(), externalSerializer, accumulatorName, checkpointConfig);
        converter = context.createDataStructureConverter(consumedDataType);
        converter.open(RuntimeConverter.Context.create(classLoader));
        final CollectStreamSink<RowData> sink = new CollectStreamSink<>(inputStream, factory);
        providerContext.generateUid(COLLECT_TRANSFORMATION).ifPresent(sink::uid);
        return sink.name("Collect table sink");
    };
}
Also used : CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) DataStreamSinkProvider(org.apache.flink.table.connector.sink.DataStreamSinkProvider) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) CollectStreamSink(org.apache.flink.streaming.api.operators.collect.CollectStreamSink) RowData(org.apache.flink.table.data.RowData) CollectSinkOperator(org.apache.flink.streaming.api.operators.collect.CollectSinkOperator) CollectSinkOperatorFactory(org.apache.flink.streaming.api.operators.collect.CollectSinkOperatorFactory)

Aggregations

DataStreamSinkProvider (org.apache.flink.table.connector.sink.DataStreamSinkProvider)5 SinkV2Provider (org.apache.flink.table.connector.sink.SinkV2Provider)4 RowData (org.apache.flink.table.data.RowData)4 Arrays (java.util.Arrays)3 Collections (java.util.Collections)3 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)3 SinkFunction (org.apache.flink.streaming.api.functions.sink.SinkFunction)3 DataTypes (org.apache.flink.table.api.DataTypes)3 ValidationException (org.apache.flink.table.api.ValidationException)3 DynamicTableSink (org.apache.flink.table.connector.sink.DynamicTableSink)3 SinkProvider (org.apache.flink.table.connector.sink.SinkProvider)3 DynamicTableSource (org.apache.flink.table.connector.source.DynamicTableSource)3 Instant (java.time.Instant)2 ArrayList (java.util.ArrayList)2 Collection (java.util.Collection)2 List (java.util.List)2 ExecutionException (java.util.concurrent.ExecutionException)2 Collectors (java.util.stream.Collectors)2 SourceFunction (org.apache.flink.streaming.api.functions.source.SourceFunction)2 SinkV1Adapter (org.apache.flink.streaming.api.transformations.SinkV1Adapter)2