use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.
the class MultipleInputNodeCreationProcessor method wrapExecNodes.
// --------------------------------------------------------------------------------
// Wrapping and Sorting
// --------------------------------------------------------------------------------
private List<ExecNodeWrapper> wrapExecNodes(List<ExecNode<?>> rootNodes) {
Map<ExecNode<?>, ExecNodeWrapper> wrapperMap = new HashMap<>();
AbstractExecNodeExactlyOnceVisitor visitor = new AbstractExecNodeExactlyOnceVisitor() {
@Override
protected void visitNode(ExecNode<?> node) {
ExecNodeWrapper wrapper = wrapperMap.computeIfAbsent(node, k -> new ExecNodeWrapper(node));
for (ExecEdge inputEdge : node.getInputEdges()) {
ExecNode<?> inputNode = inputEdge.getSource();
ExecNodeWrapper inputWrapper = wrapperMap.computeIfAbsent(inputNode, k -> new ExecNodeWrapper(inputNode));
wrapper.inputs.add(inputWrapper);
inputWrapper.outputs.add(wrapper);
}
visitInputs(node);
}
};
rootNodes.forEach(s -> s.accept(visitor));
List<ExecNodeWrapper> rootWrappers = new ArrayList<>();
for (ExecNode<?> root : rootNodes) {
ExecNodeWrapper rootWrapper = wrapperMap.get(root);
Preconditions.checkNotNull(rootWrapper, "Root node is not wrapped. This is a bug.");
rootWrappers.add(rootWrapper);
}
return rootWrappers;
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.
the class MultipleInputNodeCreationProcessor method createMultipleInputNode.
private ExecNode<?> createMultipleInputNode(MultipleInputGroup group, Map<ExecNodeWrapper, ExecNode<?>> visitedMap) {
// calculate the inputs of the multiple input node
List<Tuple3<ExecNode<?>, InputProperty, ExecEdge>> inputs = new ArrayList<>();
for (ExecNodeWrapper member : group.members) {
for (int i = 0; i < member.inputs.size(); i++) {
ExecNodeWrapper memberInput = member.inputs.get(i);
if (group.members.contains(memberInput)) {
continue;
}
Preconditions.checkState(visitedMap.containsKey(memberInput), "Input of a multiple input member is not visited. This is a bug.");
ExecNode<?> inputNode = visitedMap.get(memberInput);
InputProperty inputProperty = member.execNode.getInputProperties().get(i);
ExecEdge edge = member.execNode.getInputEdges().get(i);
inputs.add(Tuple3.of(inputNode, inputProperty, edge));
}
}
if (isStreaming) {
return createStreamMultipleInputNode(group, inputs);
} else {
return createBatchMultipleInputNode(group, inputs);
}
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.
the class MultipleInputNodeCreationProcessor method getMultipleInputNode.
private ExecNode<?> getMultipleInputNode(ExecNodeWrapper wrapper, Map<ExecNodeWrapper, ExecNode<?>> visitedMap) {
if (visitedMap.containsKey(wrapper)) {
return visitedMap.get(wrapper);
}
for (int i = 0; i < wrapper.inputs.size(); i++) {
ExecNode<?> multipleInputNode = getMultipleInputNode(wrapper.inputs.get(i), visitedMap);
ExecEdge execEdge = ExecEdge.builder().source(multipleInputNode).target(wrapper.execNode).build();
wrapper.execNode.replaceInputEdge(i, execEdge);
}
ExecNode<?> ret;
if (wrapper.group != null && wrapper == wrapper.group.root) {
ret = createMultipleInputNode(wrapper.group, visitedMap);
} else {
ret = wrapper.execNode;
}
visitedMap.put(wrapper, ret);
return ret;
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.
the class InputPriorityConflictResolver method resolveInputPriorityConflict.
@Override
protected void resolveInputPriorityConflict(ExecNode<?> node, int higherInput, int lowerInput) {
ExecNode<?> higherNode = node.getInputEdges().get(higherInput).getSource();
ExecNode<?> lowerNode = node.getInputEdges().get(lowerInput).getSource();
final ExecNode<?> newNode;
if (lowerNode instanceof BatchExecExchange) {
BatchExecExchange exchange = (BatchExecExchange) lowerNode;
InputProperty inputEdge = exchange.getInputProperties().get(0);
InputProperty inputProperty = InputProperty.builder().requiredDistribution(inputEdge.getRequiredDistribution()).priority(inputEdge.getPriority()).damBehavior(getDamBehavior()).build();
if (isConflictCausedByExchange(higherNode, exchange)) {
// special case: if exchange is exactly the reuse node,
// we should split it into two nodes
BatchExecExchange newExchange = new BatchExecExchange(inputProperty, (RowType) exchange.getOutputType(), "Exchange");
newExchange.setRequiredExchangeMode(exchangeMode);
newExchange.setInputEdges(exchange.getInputEdges());
newNode = newExchange;
} else {
// create new BatchExecExchange with new inputProperty
BatchExecExchange newExchange = new BatchExecExchange(inputProperty, (RowType) exchange.getOutputType(), exchange.getDescription());
newExchange.setRequiredExchangeMode(exchangeMode);
newExchange.setInputEdges(exchange.getInputEdges());
newNode = newExchange;
}
} else {
newNode = createExchange(node, lowerInput);
}
ExecEdge newEdge = ExecEdge.builder().source(newNode).target(node).build();
node.replaceInputEdge(lowerInput, newEdge);
}
use of org.apache.flink.table.planner.plan.nodes.exec.ExecEdge in project flink by apache.
the class CommonExecLookupJoin method translateToPlanInternal.
@Override
@SuppressWarnings("unchecked")
public Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
RelOptTable temporalTable = temporalTableSourceSpec.getTemporalTable(planner.getFlinkContext());
// validate whether the node is valid and supported.
validate(temporalTable);
final ExecEdge inputEdge = getInputEdges().get(0);
RowType inputRowType = (RowType) inputEdge.getOutputType();
RowType tableSourceRowType = FlinkTypeFactory.toLogicalRowType(temporalTable.getRowType());
RowType resultRowType = (RowType) getOutputType();
validateLookupKeyType(lookupKeys, inputRowType, tableSourceRowType);
boolean isAsyncEnabled = false;
UserDefinedFunction userDefinedFunction = LookupJoinUtil.getLookupFunction(temporalTable, lookupKeys.keySet());
UserDefinedFunctionHelper.prepareInstance(config, userDefinedFunction);
if (userDefinedFunction instanceof AsyncTableFunction) {
isAsyncEnabled = true;
}
boolean isLeftOuterJoin = joinType == FlinkJoinType.LEFT;
StreamOperatorFactory<RowData> operatorFactory;
if (isAsyncEnabled) {
operatorFactory = createAsyncLookupJoin(temporalTable, config, lookupKeys, (AsyncTableFunction<Object>) userDefinedFunction, planner.getRelBuilder(), inputRowType, tableSourceRowType, resultRowType, isLeftOuterJoin);
} else {
operatorFactory = createSyncLookupJoin(temporalTable, config, lookupKeys, (TableFunction<Object>) userDefinedFunction, planner.getRelBuilder(), inputRowType, tableSourceRowType, resultRowType, isLeftOuterJoin, planner.getExecEnv().getConfig().isObjectReuseEnabled());
}
Transformation<RowData> inputTransformation = (Transformation<RowData>) inputEdge.translateToPlan(planner);
return ExecNodeUtil.createOneInputTransformation(inputTransformation, createTransformationMeta(LOOKUP_JOIN_TRANSFORMATION, config), operatorFactory, InternalTypeInfo.of(resultRowType), inputTransformation.getParallelism());
}
Aggregations