use of org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata in project flink by apache.
the class StreamExecMatch method translateOrder.
private Transformation<RowData> translateOrder(Transformation<RowData> inputTransform, RowType inputRowType) {
SortSpec.SortFieldSpec timeOrderField = matchSpec.getOrderKeys().getFieldSpec(0);
int timeOrderFieldIdx = timeOrderField.getFieldIndex();
LogicalType timeOrderFieldType = inputRowType.getTypeAt(timeOrderFieldIdx);
if (TypeCheckUtils.isRowTime(timeOrderFieldType)) {
// copy the rowtime field into the StreamRecord timestamp field
int precision = getPrecision(timeOrderFieldType);
Transformation<RowData> transform = ExecNodeUtil.createOneInputTransformation(inputTransform, new TransformationMetadata(createTransformationUid(TIMESTAMP_INSERTER_TRANSFORMATION), "StreamRecordTimestampInserter", String.format("StreamRecordTimestampInserter(rowtime field: %s)", timeOrderFieldIdx)), new StreamRecordTimestampInserter(timeOrderFieldIdx, precision), inputTransform.getOutputType(), inputTransform.getParallelism());
if (inputsContainSingleton()) {
transform.setParallelism(1);
transform.setMaxParallelism(1);
}
return transform;
} else {
return inputTransform;
}
}
use of org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata in project flink by apache.
the class ExternalDynamicSink method getSinkRuntimeProvider.
@Override
public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
final DynamicTableSink.DataStructureConverter physicalConverter = context.createDataStructureConverter(physicalDataType);
return (TransformationSinkProvider) transformationContext -> {
final Transformation<RowData> input = transformationContext.getInputTransformation();
final LogicalType physicalType = physicalDataType.getLogicalType();
final RowData.FieldGetter atomicFieldGetter;
if (LogicalTypeChecks.isCompositeType(physicalType)) {
atomicFieldGetter = null;
} else {
atomicFieldGetter = RowData.createFieldGetter(physicalType, 0);
}
TransformationMetadata transformationMeta = transformationContext.generateUid(EXTERNAL_DATASTREAM_TRANSFORMATION).map(uid -> new TransformationMetadata(uid, generateOperatorName(), generateOperatorDesc())).orElseGet(() -> new TransformationMetadata(generateOperatorName(), generateOperatorDesc()));
return ExecNodeUtil.createOneInputTransformation(input, transformationMeta, new OutputConversionOperator(atomicFieldGetter, physicalConverter, transformationContext.getRowtimeIndex(), consumeRowtimeMetadata), ExternalTypeInfo.of(physicalDataType), input.getParallelism());
};
}
use of org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata in project flink by apache.
the class CommonExecTableSourceScan method translateToPlanInternal.
@Override
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final StreamExecutionEnvironment env = planner.getExecEnv();
final TransformationMetadata meta = createTransformationMeta(SOURCE_TRANSFORMATION, config);
final InternalTypeInfo<RowData> outputTypeInfo = InternalTypeInfo.of((RowType) getOutputType());
final ScanTableSource tableSource = tableSourceSpec.getScanTableSource(planner.getFlinkContext());
ScanTableSource.ScanRuntimeProvider provider = tableSource.getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE);
if (provider instanceof SourceFunctionProvider) {
final SourceFunctionProvider sourceFunctionProvider = (SourceFunctionProvider) provider;
final SourceFunction<RowData> function = sourceFunctionProvider.createSourceFunction();
final Transformation<RowData> transformation = createSourceFunctionTransformation(env, function, sourceFunctionProvider.isBounded(), meta.getName(), outputTypeInfo);
return meta.fill(transformation);
} else if (provider instanceof InputFormatProvider) {
final InputFormat<RowData, ?> inputFormat = ((InputFormatProvider) provider).createInputFormat();
final Transformation<RowData> transformation = createInputFormatTransformation(env, inputFormat, outputTypeInfo, meta.getName());
return meta.fill(transformation);
} else if (provider instanceof SourceProvider) {
final Source<RowData, ?, ?> source = ((SourceProvider) provider).createSource();
// TODO: Push down watermark strategy to source scan
final Transformation<RowData> transformation = env.fromSource(source, WatermarkStrategy.noWatermarks(), meta.getName(), outputTypeInfo).getTransformation();
return meta.fill(transformation);
} else if (provider instanceof DataStreamScanProvider) {
Transformation<RowData> transformation = ((DataStreamScanProvider) provider).produceDataStream(createProviderContext(), env).getTransformation();
meta.fill(transformation);
transformation.setOutputType(outputTypeInfo);
return transformation;
} else if (provider instanceof TransformationScanProvider) {
final Transformation<RowData> transformation = ((TransformationScanProvider) provider).createTransformation(createProviderContext());
meta.fill(transformation);
transformation.setOutputType(outputTypeInfo);
return transformation;
} else {
throw new UnsupportedOperationException(provider.getClass().getSimpleName() + " is unsupported now.");
}
}
use of org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata in project flink by apache.
the class CommonExecSink method applySinkProvider.
private Transformation<?> applySinkProvider(Transformation<RowData> inputTransform, StreamExecutionEnvironment env, SinkRuntimeProvider runtimeProvider, int rowtimeFieldIndex, int sinkParallelism, ReadableConfig config) {
TransformationMetadata sinkMeta = createTransformationMeta(SINK_TRANSFORMATION, config);
if (runtimeProvider instanceof DataStreamSinkProvider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final DataStreamSinkProvider provider = (DataStreamSinkProvider) runtimeProvider;
return provider.consumeDataStream(createProviderContext(), dataStream).getTransformation();
} else if (runtimeProvider instanceof TransformationSinkProvider) {
final TransformationSinkProvider provider = (TransformationSinkProvider) runtimeProvider;
return provider.createTransformation(new TransformationSinkProvider.Context() {
@Override
public Transformation<RowData> getInputTransformation() {
return inputTransform;
}
@Override
public int getRowtimeIndex() {
return rowtimeFieldIndex;
}
@Override
public Optional<String> generateUid(String name) {
return createProviderContext().generateUid(name);
}
});
} else if (runtimeProvider instanceof SinkFunctionProvider) {
final SinkFunction<RowData> sinkFunction = ((SinkFunctionProvider) runtimeProvider).createSinkFunction();
return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
} else if (runtimeProvider instanceof OutputFormatProvider) {
OutputFormat<RowData> outputFormat = ((OutputFormatProvider) runtimeProvider).createOutputFormat();
final SinkFunction<RowData> sinkFunction = new OutputFormatSinkFunction<>(outputFormat);
return createSinkFunctionTransformation(sinkFunction, env, inputTransform, rowtimeFieldIndex, sinkMeta, sinkParallelism);
} else if (runtimeProvider instanceof SinkProvider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final Transformation<?> transformation = DataStreamSink.forSinkV1(dataStream, ((SinkProvider) runtimeProvider).createSink()).getTransformation();
transformation.setParallelism(sinkParallelism);
sinkMeta.fill(transformation);
return transformation;
} else if (runtimeProvider instanceof SinkV2Provider) {
Transformation<RowData> sinkTransformation = applyRowtimeTransformation(inputTransform, rowtimeFieldIndex, sinkParallelism, config);
final DataStream<RowData> dataStream = new DataStream<>(env, sinkTransformation);
final Transformation<?> transformation = DataStreamSink.forSink(dataStream, ((SinkV2Provider) runtimeProvider).createSink()).getTransformation();
transformation.setParallelism(sinkParallelism);
sinkMeta.fill(transformation);
return transformation;
} else {
throw new TableException("Unsupported sink runtime provider.");
}
}
use of org.apache.flink.table.planner.plan.nodes.exec.utils.TransformationMetadata in project flink by apache.
the class ExternalDynamicSource method getScanRuntimeProvider.
@Override
public ScanRuntimeProvider getScanRuntimeProvider(ScanContext runtimeProviderContext) {
final DataStructureConverter physicalConverter = runtimeProviderContext.createDataStructureConverter(physicalDataType);
final Transformation<E> externalTransformation = dataStream.getTransformation();
final boolean isBounded = !isUnboundedSource(externalTransformation) && externalTransformation.getTransitivePredecessors().stream().noneMatch(this::isUnboundedSource);
return new TransformationScanProvider() {
@Override
public Transformation<RowData> createTransformation(ProviderContext providerContext) {
return ExecNodeUtil.createOneInputTransformation(externalTransformation, providerContext.generateUid(EXTERNAL_DATASTREAM_TRANSFORMATION).map(uid -> new TransformationMetadata(uid, generateOperatorName(), generateOperatorDesc())).orElseGet(() -> new TransformationMetadata(generateOperatorName(), generateOperatorDesc())), new InputConversionOperator<>(physicalConverter, !isTopLevelRecord, produceRowtimeMetadata, propagateWatermark, changelogMode.containsOnly(RowKind.INSERT)), // will be filled by the framework
null, externalTransformation.getParallelism());
}
@Override
public boolean isBounded() {
return isBounded;
}
};
}
Aggregations