use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class KinesisDynamicTableSinkFactoryTest method testGoodTableSinkForNonPartitionedTableWithProducerOptions.
@Test
public void testGoodTableSinkForNonPartitionedTableWithProducerOptions() {
ResolvedSchema sinkSchema = defaultSinkSchema();
Map<String, String> sinkOptions = defaultTableOptionsWithDeprecatedOptions().build();
// Construct actual DynamicTableSink using FactoryUtil
KinesisDynamicSink actualSink = (KinesisDynamicSink) createTableSink(sinkSchema, sinkOptions);
// Construct expected DynamicTableSink using factory under test
KinesisDynamicSink expectedSink = (KinesisDynamicSink) new KinesisDynamicSink.KinesisDynamicTableSinkBuilder().setFailOnError(true).setMaxBatchSize(100).setMaxInFlightRequests(100).setMaxTimeInBufferMS(1000).setConsumedDataType(sinkSchema.toPhysicalRowDataType()).setStream(STREAM_NAME).setKinesisClientProperties(defaultProducerProperties()).setEncodingFormat(new TestFormatFactory.EncodingFormatMock(",")).setPartitioner(new RandomKinesisPartitionKeyGenerator<>()).build();
// verify that the constructed DynamicTableSink is as expected
Assertions.assertThat(actualSink).isEqualTo(expectedSink);
// verify the produced sink
DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
Assertions.assertThat(sinkFunction).isInstanceOf(KinesisDataStreamsSink.class);
}
use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class ElasticsearchDynamicSinkBaseITCase method testWritingDocuments.
@Test
public void testWritingDocuments() throws Exception {
ResolvedSchema schema = new ResolvedSchema(Arrays.asList(Column.physical("a", DataTypes.BIGINT().notNull()), Column.physical("b", DataTypes.TIME()), Column.physical("c", DataTypes.STRING().notNull()), Column.physical("d", DataTypes.FLOAT()), Column.physical("e", DataTypes.TINYINT().notNull()), Column.physical("f", DataTypes.DATE()), Column.physical("g", DataTypes.TIMESTAMP().notNull())), Collections.emptyList(), UniqueConstraint.primaryKey("name", Arrays.asList("a", "g")));
GenericRowData rowData = GenericRowData.of(1L, 12345, StringData.fromString("ABCDE"), 12.12f, (byte) 2, 12345, TimestampData.fromLocalDateTime(LocalDateTime.parse("2012-12-12T12:12:12")));
String index = "writing-documents";
ElasticsearchDynamicSinkFactoryBase sinkFactory = getDynamicSinkFactory();
DynamicTableSink.SinkRuntimeProvider runtimeProvider = sinkFactory.createDynamicTableSink(getPrefilledTestContext(index).withSchema(schema).build()).getSinkRuntimeProvider(new ElasticsearchUtil.MockContext());
final SinkV2Provider sinkProvider = (SinkV2Provider) runtimeProvider;
final Sink<RowData> sink = sinkProvider.createSink();
StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
environment.setParallelism(4);
rowData.setRowKind(RowKind.UPDATE_AFTER);
environment.<RowData>fromElements(rowData).sinkTo(sink);
environment.execute();
RestHighLevelClient client = getClient();
Map<String, Object> response = makeGetRequest(client, index, "1_2012-12-12T12:12:12");
Map<Object, Object> expectedMap = new HashMap<>();
expectedMap.put("a", 1);
expectedMap.put("b", "00:00:12");
expectedMap.put("c", "ABCDE");
expectedMap.put("d", 12.12d);
expectedMap.put("e", 2);
expectedMap.put("f", "2003-10-20");
expectedMap.put("g", "2012-12-12 12:12:12");
Assertions.assertEquals(response, expectedMap);
}
use of org.apache.flink.table.connector.sink.SinkV2Provider in project flink by apache.
the class CommonExecSink method applySinkProvider.
private Transformation<?> applySinkProvider(Transformation<RowData> inputTransform, StreamExecutionEnvironment env, SinkRuntimeProvider runtimeProvider, int rowtimeFieldIndex, int sinkParallelism, ReadableConfig config) {
TransformationMetadata sinkMeta = createTransformationMeta(SINK_TRANSFORMATION, config);
if (runtimeProvider instanceof DataStreamSinkProvider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final DataStreamSinkProvider provider = (DataStreamSinkProvider) runtimeProvider;
return provider.consumeDataStream(createProviderContext(), dataStream).getTransformation();
} else if (runtimeProvider instanceof TransformationSinkProvider) {
final TransformationSinkProvider provider = (TransformationSinkProvider) runtimeProvider;
return provider.createTransformation(new TransformationSinkProvider.Context() {
@Override
public Transformation<RowData> getInputTransformation() {
return inputTransform;
}
@Override
public int getRowtimeIndex() {
return rowtimeFieldIndex;
}
@Override
public Optional<String> generateUid(String name) {
return createProviderContext().generateUid(name);
}
});
} else if (runtimeProvider instanceof SinkFunctionProvider) {
final SinkFunction<RowData> sinkFunction = ((SinkFunctionProvider) runtimeProvider).createSinkFunction();
return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
} else if (runtimeProvider instanceof OutputFormatProvider) {
OutputFormat<RowData> outputFormat = ((OutputFormatProvider) runtimeProvider).createOutputFormat();
final SinkFunction<RowData> sinkFunction = new OutputFormatSinkFunction<>(outputFormat);
return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
} else if (runtimeProvider instanceof SinkProvider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final Transformation<?> transformation = DataStreamSink.forSinkV1(dataStream, ((SinkProvider) runtimeProvider).createSink()).getTransformation();
transformation.setParallelism(sinkParallelism);
sinkMeta.fill(transformation);
return transformation;
} else if (runtimeProvider instanceof SinkV2Provider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final Transformation<?> transformation = DataStreamSink.forSink(dataStream, ((SinkV2Provider) runtimeProvider).createSink()).getTransformation();
transformation.setParallelism(sinkParallelism);
sinkMeta.fill(transformation);
return transformation;
} else {
throw new TableException("Unsupported sink runtime provider.");
}
}
Aggregations