use of org.apache.flink.table.connector.sink.DynamicTableSink.SinkRuntimeProvider in project flink by apache.
the class CommonExecSink method deriveSinkParallelism.
/**
* Returns the parallelism of sink operator, it assumes the sink runtime provider implements
* {@link ParallelismProvider}. It returns parallelism defined in {@link ParallelismProvider} if
* the parallelism is provided, otherwise it uses parallelism of input transformation.
*/
private int deriveSinkParallelism(Transformation<RowData> inputTransform, SinkRuntimeProvider runtimeProvider) {
final int inputParallelism = inputTransform.getParallelism();
if (!(runtimeProvider instanceof ParallelismProvider)) {
return inputParallelism;
}
final ParallelismProvider parallelismProvider = (ParallelismProvider) runtimeProvider;
return parallelismProvider.getParallelism().map(sinkParallelism -> {
if (sinkParallelism <= 0) {
throw new TableException(String.format("Invalid configured parallelism %s for table '%s'.", sinkParallelism, tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString()));
}
return sinkParallelism;
}).orElse(inputParallelism);
}
use of org.apache.flink.table.connector.sink.DynamicTableSink.SinkRuntimeProvider in project flink by apache.
the class CommonExecSink method createSinkTransformation.
@SuppressWarnings("unchecked")
protected Transformation<Object> createSinkTransformation(StreamExecutionEnvironment streamExecEnv, ReadableConfig config, Transformation<RowData> inputTransform, DynamicTableSink tableSink, int rowtimeFieldIndex, boolean upsertMaterialize) {
final ResolvedSchema schema = tableSinkSpec.getContextResolvedTable().getResolvedSchema();
final SinkRuntimeProvider runtimeProvider = tableSink.getSinkRuntimeProvider(new SinkRuntimeProviderContext(isBounded));
final RowType physicalRowType = getPhysicalRowType(schema);
final int[] primaryKeys = getPrimaryKeyIndices(physicalRowType, schema);
final int sinkParallelism = deriveSinkParallelism(inputTransform, runtimeProvider);
final int inputParallelism = inputTransform.getParallelism();
final boolean inputInsertOnly = inputChangelogMode.containsOnly(RowKind.INSERT);
final boolean hasPk = primaryKeys.length > 0;
if (!inputInsertOnly && sinkParallelism != inputParallelism && !hasPk) {
throw new TableException(String.format("The sink for table '%s' has a configured parallelism of %s, while the input parallelism is %s. " + "Since the configured parallelism is different from the input's parallelism and " + "the changelog mode is not insert-only, a primary key is required but could not " + "be found.", tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString(), sinkParallelism, inputParallelism));
}
// only add materialization if input has change
final boolean needMaterialization = !inputInsertOnly && upsertMaterialize;
Transformation<RowData> sinkTransform = applyConstraintValidations(inputTransform, config, physicalRowType);
if (hasPk) {
sinkTransform = applyKeyBy(config, sinkTransform, primaryKeys, sinkParallelism, inputParallelism, inputInsertOnly, needMaterialization);
}
if (needMaterialization) {
sinkTransform = applyUpsertMaterialize(sinkTransform, primaryKeys, sinkParallelism, config, physicalRowType);
}
return (Transformation<Object>) applySinkProvider(sinkTransform, streamExecEnv, runtimeProvider, rowtimeFieldIndex, sinkParallelism, config);
}
Aggregations