use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class KinesisDynamicTableSinkFactoryTest method testGoodTableSinkForNonPartitionedTableWithProducerOptions.
@Test
public void testGoodTableSinkForNonPartitionedTableWithProducerOptions() {
ResolvedSchema sinkSchema = defaultSinkSchema();
Map<String, String> sinkOptions = defaultTableOptionsWithDeprecatedOptions().build();
// Construct actual DynamicTableSink using FactoryUtil
KinesisDynamicSink actualSink = (KinesisDynamicSink) createTableSink(sinkSchema, sinkOptions);
// Construct expected DynamicTableSink using factory under test
KinesisDynamicSink expectedSink = (KinesisDynamicSink) new KinesisDynamicSink.KinesisDynamicTableSinkBuilder().setFailOnError(true).setMaxBatchSize(100).setMaxInFlightRequests(100).setMaxTimeInBufferMS(1000).setConsumedDataType(sinkSchema.toPhysicalRowDataType()).setStream(STREAM_NAME).setKinesisClientProperties(defaultProducerProperties()).setEncodingFormat(new TestFormatFactory.EncodingFormatMock(",")).setPartitioner(new RandomKinesisPartitionKeyGenerator<>()).build();
// verify that the constructed DynamicTableSink is as expected
Assertions.assertThat(actualSink).isEqualTo(expectedSink);
// verify the produced sink
DynamicTableSink.SinkRuntimeProvider sinkFunctionProvider = actualSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(false));
Sink<RowData> sinkFunction = ((SinkV2Provider) sinkFunctionProvider).createSink();
Assertions.assertThat(sinkFunction).isInstanceOf(KinesisDataStreamsSink.class);
}
use of org.apache.flink.table.runtime.connector.sink.SinkRuntimeProviderContext in project flink by apache.
the class CommonExecSink method createSinkTransformation.
@SuppressWarnings("unchecked")
protected Transformation<Object> createSinkTransformation(StreamExecutionEnvironment streamExecEnv, ReadableConfig config, Transformation<RowData> inputTransform, DynamicTableSink tableSink, int rowtimeFieldIndex, boolean upsertMaterialize) {
final ResolvedSchema schema = tableSinkSpec.getContextResolvedTable().getResolvedSchema();
final SinkRuntimeProvider runtimeProvider = tableSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(isBounded));
final RowType physicalRowType = getPhysicalRowType(schema);
final int[] primaryKeys = getPrimaryKeyIndices(physicalRowType, schema);
final int sinkParallelism = deriveSinkParallelism(inputTransform, runtimeProvider);
final int inputParallelism = inputTransform.getParallelism();
final boolean inputInsertOnly = inputChangelogMode.containsOnly(RowKind.INSERT);
final boolean hasPk = primaryKeys.length > 0;
if (!inputInsertOnly && sinkParallelism != inputParallelism && !hasPk) {
throw new TableException(String.format("The sink for table '%s' has a configured parallelism of %s, while the input parallelism is %s. " + "Since the configured parallelism is different from the input's parallelism and " + "the changelog mode is not insert-only, a primary key is required but could not " + "be found.", tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString(), sinkParallelism, inputParallelism));
}
// only add materialization if input has change
final boolean needMaterialization = !inputInsertOnly && upsertMaterialize;
Transformation<RowData> sinkTransform = applyConstraintValidations(inputTransform, config, physicalRowType);
if (hasPk) {
sinkTransform = applyKeyBy(config, sinkTransform, primaryKeys, sinkParallelism, inputParallelism, inputInsertOnly, needMaterialization);
}
if (needMaterialization) {
sinkTransform = applyUpsertMaterialize(sinkTransform, primaryKeys, sinkParallelism, config, physicalRowType);
}
return (Transformation<Object>) applySinkProvider(sinkTransform, streamExecEnv, runtimeProvider, rowtimeFieldIndex, sinkParallelism, config);
}
Aggregations