Search in sources :

Example 1 with ExecNodeGraph

use of org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph in project flink by apache.

the class DeadlockBreakupProcessor method process.

@Override
public ExecNodeGraph process(ExecNodeGraph execGraph, ProcessorContext context) {
    if (!execGraph.getRootNodes().stream().allMatch(r -> r instanceof BatchExecNode)) {
        throw new TableException("Only BatchExecNode DAG are supported now.");
    }
    InputPriorityConflictResolver resolver = new InputPriorityConflictResolver(execGraph.getRootNodes(), InputProperty.DamBehavior.END_INPUT, StreamExchangeMode.BATCH, context.getPlanner().getConfiguration());
    resolver.detectAndResolve();
    return execGraph;
}
Also used : BatchExecNode(org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecNode) StreamExchangeMode(org.apache.flink.streaming.api.transformations.StreamExchangeMode) InputPriorityConflictResolver(org.apache.flink.table.planner.plan.nodes.exec.processor.utils.InputPriorityConflictResolver) InputProperty(org.apache.flink.table.planner.plan.nodes.exec.InputProperty) TableException(org.apache.flink.table.api.TableException) ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) TableException(org.apache.flink.table.api.TableException) BatchExecNode(org.apache.flink.table.planner.plan.nodes.exec.batch.BatchExecNode) InputPriorityConflictResolver(org.apache.flink.table.planner.plan.nodes.exec.processor.utils.InputPriorityConflictResolver)

Example 2 with ExecNodeGraph

use of org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph in project flink by apache.

the class MultipleInputNodeCreationProcessorTest method assertChainableSource.

private void assertChainableSource(String name, TableTestUtil util, boolean expected) {
    String sql = "SELECT * FROM " + name;
    Table table = util.tableEnv().sqlQuery(sql);
    RelNode relNode = TableTestUtil.toRelNode(table);
    FlinkPhysicalRel optimizedRel = (FlinkPhysicalRel) util.getPlanner().optimize(relNode);
    ExecNodeGraphGenerator generator = new ExecNodeGraphGenerator();
    ExecNodeGraph execGraph = generator.generate(Collections.singletonList(optimizedRel));
    ExecNode<?> execNode = execGraph.getRootNodes().get(0);
    while (!execNode.getInputEdges().isEmpty()) {
        execNode = execNode.getInputEdges().get(0).getSource();
    }
    ProcessorContext context = new ProcessorContext(util.getPlanner());
    Assert.assertEquals(expected, MultipleInputNodeCreationProcessor.isChainableSource(execNode, context));
}
Also used : ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) Table(org.apache.flink.table.api.Table) RelNode(org.apache.calcite.rel.RelNode) ExecNodeGraphGenerator(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraphGenerator) FlinkPhysicalRel(org.apache.flink.table.planner.plan.nodes.physical.FlinkPhysicalRel)

Example 3 with ExecNodeGraph

use of org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph in project flink by apache.

the class JsonPlanGraph method convertToExecNodeGraph.

ExecNodeGraph convertToExecNodeGraph() {
    Map<Integer, ExecNode<?>> idToExecNodes = new HashMap<>();
    for (ExecNode<?> execNode : nodes) {
        int id = execNode.getId();
        if (idToExecNodes.containsKey(id)) {
            throw new TableException(String.format("The id: %s is not unique for ExecNode: %s.\nplease check it.", id, execNode.getDescription()));
        }
        idToExecNodes.put(id, execNode);
    }
    Map<Integer, List<ExecEdge>> idToInputEdges = new HashMap<>();
    Map<Integer, List<ExecEdge>> idToOutputEdges = new HashMap<>();
    for (JsonPlanEdge edge : edges) {
        ExecNode<?> source = idToExecNodes.get(edge.getSourceId());
        if (source == null) {
            throw new TableException(String.format("Source node id: %s is not found in nodes.", edge.getSourceId()));
        }
        ExecNode<?> target = idToExecNodes.get(edge.getTargetId());
        if (target == null) {
            throw new TableException(String.format("Target node id: %s is not found in nodes.", edge.getTargetId()));
        }
        ExecEdge execEdge = ExecEdge.builder().source(source).target(target).shuffle(edge.getShuffle()).exchangeMode(edge.getExchangeMode()).build();
        idToInputEdges.computeIfAbsent(target.getId(), n -> new ArrayList<>()).add(execEdge);
        idToOutputEdges.computeIfAbsent(source.getId(), n -> new ArrayList<>()).add(execEdge);
    }
    List<ExecNode<?>> rootNodes = new ArrayList<>();
    for (Map.Entry<Integer, ExecNode<?>> entry : idToExecNodes.entrySet()) {
        int id = entry.getKey();
        ExecNode<?> node = entry.getValue();
        // connect input edges
        List<ExecEdge> inputEdges = idToInputEdges.getOrDefault(id, new ArrayList<>());
        node.setInputEdges(inputEdges);
        if (!idToOutputEdges.containsKey(id)) {
            // if the node has no output nodes, it's a root node
            rootNodes.add(node);
        }
    }
    return new ExecNodeGraph(flinkVersion, rootNodes);
}
Also used : JsonCreator(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator) Sets(org.apache.flink.shaded.guava30.com.google.common.collect.Sets) ExecNodeVisitorImpl(org.apache.flink.table.planner.plan.nodes.exec.visitor.ExecNodeVisitorImpl) TableException(org.apache.flink.table.api.TableException) Set(java.util.Set) HashMap(java.util.HashMap) JsonProperty(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) List(java.util.List) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) Preconditions.checkArgument(org.apache.flink.util.Preconditions.checkArgument) ExecNodeVisitor(org.apache.flink.table.planner.plan.nodes.exec.visitor.ExecNodeVisitor) Map(java.util.Map) Internal(org.apache.flink.annotation.Internal) FlinkVersion(org.apache.flink.FlinkVersion) ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) TableException(org.apache.flink.table.api.TableException) ExecEdge(org.apache.flink.table.planner.plan.nodes.exec.ExecEdge) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) HashMap(java.util.HashMap) Map(java.util.Map)

Example 4 with ExecNodeGraph

use of org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph in project flink by apache.

the class MultipleInputNodeCreationProcessor method process.

@Override
public ExecNodeGraph process(ExecNodeGraph execGraph, ProcessorContext context) {
    if (!isStreaming) {
        // As multiple input nodes use function call to deliver records between sub-operators,
        // we cannot rely on network buffers to buffer records not yet ready to be read,
        // so only BLOCKING dam behavior is safe here.
        // If conflict is detected under this stricter constraint,
        // we add a PIPELINED exchange to mark that its input and output node cannot be merged
        // into the same multiple input node
        InputPriorityConflictResolver resolver = new InputPriorityConflictResolver(execGraph.getRootNodes(), InputProperty.DamBehavior.BLOCKING, StreamExchangeMode.PIPELINED, context.getPlanner().getConfiguration());
        resolver.detectAndResolve();
    }
    List<ExecNodeWrapper> rootWrappers = wrapExecNodes(execGraph.getRootNodes());
    // sort all nodes in topological order, sinks come first and sources come last
    List<ExecNodeWrapper> orderedWrappers = topologicalSort(rootWrappers);
    // group nodes into multiple input groups
    createMultipleInputGroups(orderedWrappers);
    // apply optimizations to remove unnecessary nodes out of multiple input groups
    optimizeMultipleInputGroups(orderedWrappers, context);
    // create the real multiple input nodes
    List<ExecNode<?>> newRootNodes = createMultipleInputNodes(rootWrappers);
    return new ExecNodeGraph(newRootNodes);
}
Also used : ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) ExecNode(org.apache.flink.table.planner.plan.nodes.exec.ExecNode) InputPriorityConflictResolver(org.apache.flink.table.planner.plan.nodes.exec.processor.utils.InputPriorityConflictResolver)

Example 5 with ExecNodeGraph

use of org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph in project flink by apache.

the class InternalConfigOptionsTest method testTranslateExecNodeGraphWithInternalTemporalConf.

@Test
public void testTranslateExecNodeGraphWithInternalTemporalConf() {
    Table table = tEnv.sqlQuery("SELECT LOCALTIME, LOCALTIMESTAMP, CURRENT_TIME, CURRENT_TIMESTAMP");
    RelNode relNode = planner.optimize(TableTestUtil.toRelNode(table));
    ExecNodeGraph execNodeGraph = planner.translateToExecNodeGraph(toScala(Collections.singletonList(relNode)));
    // PlannerBase#translateToExecNodeGraph will set internal temporal configurations and
    // cleanup them after translate finished
    List<Transformation<?>> transformation = planner.translateToPlan(execNodeGraph);
    // check the translation success
    Assert.assertEquals(1, transformation.size());
}
Also used : ExecNodeGraph(org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph) Transformation(org.apache.flink.api.dag.Transformation) Table(org.apache.flink.table.api.Table) RelNode(org.apache.calcite.rel.RelNode) Test(org.junit.Test)

Aggregations

ExecNodeGraph (org.apache.flink.table.planner.plan.nodes.exec.ExecNodeGraph)5 RelNode (org.apache.calcite.rel.RelNode)2 Table (org.apache.flink.table.api.Table)2 TableException (org.apache.flink.table.api.TableException)2 ExecNode (org.apache.flink.table.planner.plan.nodes.exec.ExecNode)2 InputPriorityConflictResolver (org.apache.flink.table.planner.plan.nodes.exec.processor.utils.InputPriorityConflictResolver)2 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 FlinkVersion (org.apache.flink.FlinkVersion)1 Internal (org.apache.flink.annotation.Internal)1 Transformation (org.apache.flink.api.dag.Transformation)1 Sets (org.apache.flink.shaded.guava30.com.google.common.collect.Sets)1 JsonCreator (org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonCreator)1 JsonProperty (org.apache.flink.shaded.jackson2.com.fasterxml.jackson.annotation.JsonProperty)1 StreamExchangeMode (org.apache.flink.streaming.api.transformations.StreamExchangeMode)1 ExecEdge (org.apache.flink.table.planner.plan.nodes.exec.ExecEdge)1