use of org.apache.flink.streaming.api.functions.sink.SinkFunction in project flink by apache.
the class CommonExecSink method applySinkProvider.
private Transformation<?> applySinkProvider(Transformation<RowData> inputTransform, StreamExecutionEnvironment env, SinkRuntimeProvider runtimeProvider, int rowtimeFieldIndex, int sinkParallelism, ReadableConfig config) {
TransformationMetadata sinkMeta = createTransformationMeta(SINK_TRANSFORMATION, config);
if (runtimeProvider instanceof DataStreamSinkProvider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final DataStreamSinkProvider provider = (DataStreamSinkProvider) runtimeProvider;
return provider.consumeDataStream(createProviderContext(), dataStream).getTransformation();
} else if (runtimeProvider instanceof TransformationSinkProvider) {
final TransformationSinkProvider provider = (TransformationSinkProvider) runtimeProvider;
return provider.createTransformation(new TransformationSinkProvider.Context() {
@Override
public Transformation<RowData> getInputTransformation() {
return inputTransform;
}
@Override
public int getRowtimeIndex() {
return rowtimeFieldIndex;
}
@Override
public Optional<String> generateUid(String name) {
return createProviderContext().generateUid(name);
}
});
} else if (runtimeProvider instanceof SinkFunctionProvider) {
final SinkFunction<RowData> sinkFunction = ((SinkFunctionProvider) runtimeProvider).createSinkFunction();
return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
} else if (runtimeProvider instanceof OutputFormatProvider) {
OutputFormat<RowData> outputFormat = ((OutputFormatProvider) runtimeProvider).createOutputFormat();
final SinkFunction<RowData> sinkFunction = new OutputFormatSinkFunction<>(outputFormat);
return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
} else if (runtimeProvider instanceof SinkProvider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final Transformation<?> transformation = DataStreamSink.forSinkV1(dataStream, ((SinkProvider) runtimeProvider).createSink()).getTransformation();
transformation.setParallelism(sinkParallelism);
sinkMeta.fill(transformation);
return transformation;
} else if (runtimeProvider instanceof SinkV2Provider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final Transformation<?> transformation = DataStreamSink.forSink(dataStream, ((SinkV2Provider) runtimeProvider).createSink()).getTransformation();
transformation.setParallelism(sinkParallelism);
sinkMeta.fill(transformation);
return transformation;
} else {
throw new TableException("Unsupported sink runtime provider.");
}
}
use of org.apache.flink.streaming.api.functions.sink.SinkFunction in project flink by apache.
the class CommonExecSinkITCase method testFromValuesWatermarkPropagation.
@Test
public void testFromValuesWatermarkPropagation() throws Exception {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final SharedReference<List<Long>> watermarks = sharedObjects.add(new ArrayList<>());
final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {
@Override
public void writeWatermark(org.apache.flink.api.common.eventtime.Watermark watermark) {
addElement(watermarks, watermark.getTimestamp());
}
};
final TableDescriptor sinkDescriptor = TableFactoryHarness.newBuilder().sink(new TableFactoryHarness.SinkBase() {
@Override
public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
}
}).build();
final Table source = tableEnv.fromValues(DataTypes.ROW(DataTypes.FIELD("a", DataTypes.INT())), Row.of(1), Row.of(2), Row.of(3));
source.executeInsert(sinkDescriptor).await();
assertThat(watermarks.get().size()).isEqualTo(env.getParallelism());
for (Long watermark : watermarks.get()) {
assertThat(watermark).isEqualTo(Watermark.MAX_WATERMARK.getTimestamp());
}
}
use of org.apache.flink.streaming.api.functions.sink.SinkFunction in project flink by apache.
the class CommonExecSinkITCase method testStreamRecordTimestampInserterDataStreamSinkProvider.
@Test
public void testStreamRecordTimestampInserterDataStreamSinkProvider() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final SharedReference<List<Long>> timestamps = sharedObjects.add(new ArrayList<>());
final List<Row> rows = Arrays.asList(Row.of(1, "foo", Instant.parse("2020-11-10T11:34:56.123Z")), Row.of(2, "foo", Instant.parse("2020-11-10T12:34:56.789Z")), Row.of(3, "foo", Instant.parse("2020-11-11T10:11:22.777Z")), Row.of(4, "foo", Instant.parse("2020-11-11T10:11:23.888Z")));
final SinkFunction<RowData> sinkFunction = new SinkFunction<RowData>() {
@Override
public void invoke(RowData value, Context context) {
addElement(timestamps, context.timestamp());
}
};
final TableDescriptor sourceDescriptor = TableFactoryHarness.newBuilder().schema(schemaStreamRecordTimestampInserter(true)).source(new TestSource(rows)).sink(new TableFactoryHarness.SinkBase() {
@Override
public DataStreamSinkProvider getSinkRuntimeProvider(DynamicTableSink.Context context) {
return (providerContext, dataStream) -> dataStream.addSink(sinkFunction);
}
}).build();
tableEnv.createTable("T1", sourceDescriptor);
final String sqlStmt = "INSERT INTO T1 SELECT * FROM T1";
assertPlan(tableEnv, sqlStmt, true);
tableEnv.executeSql(sqlStmt).await();
Collections.sort(timestamps.get());
assertTimestampResults(timestamps, rows);
}
Aggregations