Search in sources :

Example 1 with ChainingStrategy

use of org.apache.flink.streaming.api.operators.ChainingStrategy in project flink by apache.

the class BatchExecutionUtils method adjustChainingStrategy.

private static void adjustChainingStrategy(StreamNode node) {
    StreamOperatorFactory<?> operatorFactory = node.getOperatorFactory();
    ChainingStrategy currentChainingStrategy = operatorFactory.getChainingStrategy();
    switch(currentChainingStrategy) {
        case ALWAYS:
        case HEAD_WITH_SOURCES:
            LOG.debug("Setting chaining strategy to HEAD for operator {}, because of the BATCH execution mode.", node);
            operatorFactory.setChainingStrategy(ChainingStrategy.HEAD);
            break;
        case NEVER:
        case HEAD:
            break;
    }
}
Also used : ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy)

Example 2 with ChainingStrategy

use of org.apache.flink.streaming.api.operators.ChainingStrategy in project flink by apache.

the class SavepointITCase method testStopSavepointWithBoundedInput.

@Test
public void testStopSavepointWithBoundedInput() throws Exception {
    final int numTaskManagers = 2;
    final int numSlotsPerTaskManager = 2;
    for (ChainingStrategy chainingStrategy : ChainingStrategy.values()) {
        final MiniClusterResourceFactory clusterFactory = new MiniClusterResourceFactory(numTaskManagers, numSlotsPerTaskManager, getFileBasedCheckpointsConfig());
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        BoundedPassThroughOperator<Integer> operator = new BoundedPassThroughOperator<>(chainingStrategy);
        DataStream<Integer> stream = env.addSource(new InfiniteTestSource()).transform("pass-through", BasicTypeInfo.INT_TYPE_INFO, operator);
        stream.addSink(new DiscardingSink<>());
        final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
        final JobID jobId = jobGraph.getJobID();
        MiniClusterWithClientResource cluster = clusterFactory.get();
        cluster.before();
        ClusterClient<?> client = cluster.getClusterClient();
        try {
            BoundedPassThroughOperator.resetForTest(1, true);
            client.submitJob(jobGraph).get();
            BoundedPassThroughOperator.getProgressLatch().await();
            waitForAllTaskRunning(cluster.getMiniCluster(), jobId, false);
            client.stopWithSavepoint(jobId, false, null, SavepointFormatType.CANONICAL).get();
            Assert.assertFalse("input ended with chainingStrategy " + chainingStrategy, BoundedPassThroughOperator.inputEnded);
        } finally {
            cluster.after();
        }
    }
}
Also used : MiniClusterWithClientResource(org.apache.flink.test.util.MiniClusterWithClientResource) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 3 with ChainingStrategy

use of org.apache.flink.streaming.api.operators.ChainingStrategy in project flink by apache.

the class StreamingJobGraphGenerator method buildChainedInputsAndGetHeadInputs.

private Map<Integer, OperatorChainInfo> buildChainedInputsAndGetHeadInputs(final Map<Integer, byte[]> hashes, final List<Map<Integer, byte[]>> legacyHashes) {
    final Map<Integer, ChainedSourceInfo> chainedSources = new HashMap<>();
    final Map<Integer, OperatorChainInfo> chainEntryPoints = new HashMap<>();
    for (Integer sourceNodeId : streamGraph.getSourceIDs()) {
        final StreamNode sourceNode = streamGraph.getStreamNode(sourceNodeId);
        if (sourceNode.getOperatorFactory() instanceof SourceOperatorFactory && sourceNode.getOutEdges().size() == 1) {
            // as long as only NAry ops support this chaining, we need to skip the other parts
            final StreamEdge sourceOutEdge = sourceNode.getOutEdges().get(0);
            final StreamNode target = streamGraph.getStreamNode(sourceOutEdge.getTargetId());
            final ChainingStrategy targetChainingStrategy = target.getOperatorFactory().getChainingStrategy();
            if (targetChainingStrategy == ChainingStrategy.HEAD_WITH_SOURCES && isChainableInput(sourceOutEdge, streamGraph)) {
                final OperatorID opId = new OperatorID(hashes.get(sourceNodeId));
                final StreamConfig.SourceInputConfig inputConfig = new StreamConfig.SourceInputConfig(sourceOutEdge);
                final StreamConfig operatorConfig = new StreamConfig(new Configuration());
                setVertexConfig(sourceNodeId, operatorConfig, Collections.emptyList(), Collections.emptyList(), Collections.emptyMap());
                // sources are always first
                operatorConfig.setChainIndex(0);
                operatorConfig.setOperatorID(opId);
                operatorConfig.setOperatorName(sourceNode.getOperatorName());
                chainedSources.put(sourceNodeId, new ChainedSourceInfo(operatorConfig, inputConfig));
                final SourceOperatorFactory<?> sourceOpFact = (SourceOperatorFactory<?>) sourceNode.getOperatorFactory();
                final OperatorCoordinator.Provider coord = sourceOpFact.getCoordinatorProvider(sourceNode.getOperatorName(), opId);
                final OperatorChainInfo chainInfo = chainEntryPoints.computeIfAbsent(sourceOutEdge.getTargetId(), (k) -> new OperatorChainInfo(sourceOutEdge.getTargetId(), hashes, legacyHashes, chainedSources, streamGraph));
                chainInfo.addCoordinatorProvider(coord);
                continue;
            }
        }
        chainEntryPoints.put(sourceNodeId, new OperatorChainInfo(sourceNodeId, hashes, legacyHashes, chainedSources, streamGraph));
    }
    return chainEntryPoints;
}
Also used : CheckpointCoordinatorConfiguration(org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration) Configuration(org.apache.flink.configuration.Configuration) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) OperatorCoordinator(org.apache.flink.runtime.operators.coordination.OperatorCoordinator) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) AtomicInteger(java.util.concurrent.atomic.AtomicInteger)

Aggregations

ChainingStrategy (org.apache.flink.streaming.api.operators.ChainingStrategy)3 HashMap (java.util.HashMap)1 IdentityHashMap (java.util.IdentityHashMap)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 JobID (org.apache.flink.api.common.JobID)1 Configuration (org.apache.flink.configuration.Configuration)1 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)1 OperatorID (org.apache.flink.runtime.jobgraph.OperatorID)1 CheckpointCoordinatorConfiguration (org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration)1 OperatorCoordinator (org.apache.flink.runtime.operators.coordination.OperatorCoordinator)1 StreamExecutionEnvironment (org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)1 SourceOperatorFactory (org.apache.flink.streaming.api.operators.SourceOperatorFactory)1 MiniClusterWithClientResource (org.apache.flink.test.util.MiniClusterWithClientResource)1 Test (org.junit.Test)1