Search in sources :

Example 16 with SlotSharingGroup

use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.

the class LocalInputPreferredSlotSharingStrategyTest method testGetExecutionSlotSharingGroupOfLateAttachedVertices.

@Test
public void testGetExecutionSlotSharingGroupOfLateAttachedVertices() {
    JobVertexID jobVertexID1 = new JobVertexID();
    JobVertexID jobVertexID2 = new JobVertexID();
    JobVertexID jobVertexID3 = new JobVertexID();
    final SlotSharingGroup slotSharingGroup1 = new SlotSharingGroup();
    slotSharingGroup1.addVertexToGroup(jobVertexID1);
    slotSharingGroup1.addVertexToGroup(jobVertexID2);
    final SlotSharingGroup slotSharingGroup2 = new SlotSharingGroup();
    slotSharingGroup2.addVertexToGroup(jobVertexID3);
    TestingSchedulingTopology topology = new TestingSchedulingTopology();
    TestingSchedulingExecutionVertex ev1 = topology.newExecutionVertex(jobVertexID1, 0);
    TestingSchedulingExecutionVertex ev2 = topology.newExecutionVertex(jobVertexID2, 0);
    topology.connect(ev1, ev2);
    final LocalInputPreferredSlotSharingStrategy strategy = new LocalInputPreferredSlotSharingStrategy(topology, new HashSet<>(Arrays.asList(slotSharingGroup1, slotSharingGroup2)), Collections.emptySet());
    assertThat(strategy.getExecutionSlotSharingGroups().size(), is(1));
    assertThat(strategy.getExecutionSlotSharingGroup(ev1.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
    assertThat(strategy.getExecutionSlotSharingGroup(ev2.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
    // add new job vertices and notify scheduling topology updated
    TestingSchedulingExecutionVertex ev3 = topology.newExecutionVertex(jobVertexID3, 0);
    topology.connect(ev2, ev3, ResultPartitionType.BLOCKING);
    strategy.notifySchedulingTopologyUpdated(topology, Collections.singletonList(ev3.getId()));
    assertThat(strategy.getExecutionSlotSharingGroups().size(), is(2));
    assertThat(strategy.getExecutionSlotSharingGroup(ev1.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
    assertThat(strategy.getExecutionSlotSharingGroup(ev2.getId()).getExecutionVertexIds(), containsInAnyOrder(ev1.getId(), ev2.getId()));
    assertThat(strategy.getExecutionSlotSharingGroup(ev3.getId()).getExecutionVertexIds(), containsInAnyOrder(ev3.getId()));
}
Also used : TestingSchedulingExecutionVertex(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TestingSchedulingTopology(org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) Test(org.junit.Test)

Example 17 with SlotSharingGroup

use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.

the class StreamingJobGraphGenerator method setManagedMemoryFraction.

private static void setManagedMemoryFraction(final Map<Integer, JobVertex> jobVertices, final Map<Integer, StreamConfig> operatorConfigs, final Map<Integer, Map<Integer, StreamConfig>> vertexChainedConfigs, final java.util.function.Function<Integer, Map<ManagedMemoryUseCase, Integer>> operatorScopeManagedMemoryUseCaseWeightsRetriever, final java.util.function.Function<Integer, Set<ManagedMemoryUseCase>> slotScopeManagedMemoryUseCasesRetriever) {
    // all slot sharing groups in this job
    final Set<SlotSharingGroup> slotSharingGroups = Collections.newSetFromMap(new IdentityHashMap<>());
    // maps a job vertex ID to its head operator ID
    final Map<JobVertexID, Integer> vertexHeadOperators = new HashMap<>();
    // maps a job vertex ID to IDs of all operators in the vertex
    final Map<JobVertexID, Set<Integer>> vertexOperators = new HashMap<>();
    for (Map.Entry<Integer, JobVertex> entry : jobVertices.entrySet()) {
        final int headOperatorId = entry.getKey();
        final JobVertex jobVertex = entry.getValue();
        final SlotSharingGroup jobVertexSlotSharingGroup = jobVertex.getSlotSharingGroup();
        checkState(jobVertexSlotSharingGroup != null, "JobVertex slot sharing group must not be null");
        slotSharingGroups.add(jobVertexSlotSharingGroup);
        vertexHeadOperators.put(jobVertex.getID(), headOperatorId);
        final Set<Integer> operatorIds = new HashSet<>();
        operatorIds.add(headOperatorId);
        operatorIds.addAll(vertexChainedConfigs.getOrDefault(headOperatorId, Collections.emptyMap()).keySet());
        vertexOperators.put(jobVertex.getID(), operatorIds);
    }
    for (SlotSharingGroup slotSharingGroup : slotSharingGroups) {
        setManagedMemoryFractionForSlotSharingGroup(slotSharingGroup, vertexHeadOperators, vertexOperators, operatorConfigs, vertexChainedConfigs, operatorScopeManagedMemoryUseCaseWeightsRetriever, slotScopeManagedMemoryUseCasesRetriever);
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) HashSet(java.util.HashSet)

Example 18 with SlotSharingGroup

use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.

the class StreamingJobGraphGenerator method setSlotSharing.

private void setSlotSharing() {
    final Map<String, SlotSharingGroup> specifiedSlotSharingGroups = new HashMap<>();
    final Map<JobVertexID, SlotSharingGroup> vertexRegionSlotSharingGroups = buildVertexRegionSlotSharingGroups();
    for (Map.Entry<Integer, JobVertex> entry : jobVertices.entrySet()) {
        final JobVertex vertex = entry.getValue();
        final String slotSharingGroupKey = streamGraph.getStreamNode(entry.getKey()).getSlotSharingGroup();
        checkNotNull(slotSharingGroupKey, "StreamNode slot sharing group must not be null");
        final SlotSharingGroup effectiveSlotSharingGroup;
        if (slotSharingGroupKey.equals(StreamGraphGenerator.DEFAULT_SLOT_SHARING_GROUP)) {
            // fallback to the region slot sharing group by default
            effectiveSlotSharingGroup = checkNotNull(vertexRegionSlotSharingGroups.get(vertex.getID()));
        } else {
            effectiveSlotSharingGroup = specifiedSlotSharingGroups.computeIfAbsent(slotSharingGroupKey, k -> {
                SlotSharingGroup ssg = new SlotSharingGroup();
                streamGraph.getSlotSharingGroupResource(k).ifPresent(ssg::setResourceProfile);
                return ssg;
            });
        }
        vertex.setSlotSharingGroup(effectiveSlotSharingGroup);
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Arrays(java.util.Arrays) DefaultLogicalPipelinedRegion(org.apache.flink.runtime.jobgraph.topology.DefaultLogicalPipelinedRegion) InputSelectable(org.apache.flink.streaming.api.operators.InputSelectable) Tuple2(org.apache.flink.api.java.tuple.Tuple2) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CheckpointingMode(org.apache.flink.streaming.api.CheckpointingMode) YieldingOperatorFactory(org.apache.flink.streaming.api.operators.YieldingOperatorFactory) LoggerFactory(org.slf4j.LoggerFactory) CheckpointCoordinatorConfiguration(org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration) CheckpointStorage(org.apache.flink.runtime.state.CheckpointStorage) CoLocationGroupImpl(org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroupImpl) StringUtils(org.apache.commons.lang3.StringUtils) StateBackend(org.apache.flink.runtime.state.StateBackend) ChainingStrategy(org.apache.flink.streaming.api.operators.ChainingStrategy) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ResourceSpec(org.apache.flink.api.common.operators.ResourceSpec) ManagedMemoryUseCase(org.apache.flink.core.memory.ManagedMemoryUseCase) CustomPartitionerWrapper(org.apache.flink.streaming.runtime.partitioner.CustomPartitionerWrapper) Map(java.util.Map) Function(org.apache.flink.api.common.functions.Function) WithMasterCheckpointHook(org.apache.flink.streaming.api.checkpoint.WithMasterCheckpointHook) Preconditions.checkNotNull(org.apache.flink.util.Preconditions.checkNotNull) ExecutionOptions(org.apache.flink.configuration.ExecutionOptions) JobCheckpointingSettings(org.apache.flink.runtime.jobgraph.tasks.JobCheckpointingSettings) MINIMAL_CHECKPOINT_TIME(org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration.MINIMAL_CHECKPOINT_TIME) TypeSerializer(org.apache.flink.api.common.typeutils.TypeSerializer) ForwardPartitioner(org.apache.flink.streaming.runtime.partitioner.ForwardPartitioner) IdentityHashMap(java.util.IdentityHashMap) TaskConfig(org.apache.flink.runtime.operators.util.TaskConfig) Collection(java.util.Collection) Set(java.util.Set) DistributedCache(org.apache.flink.api.common.cache.DistributedCache) Collectors(java.util.stream.Collectors) List(java.util.List) SerializedValue(org.apache.flink.util.SerializedValue) Preconditions.checkArgument(org.apache.flink.util.Preconditions.checkArgument) UdfStreamOperatorFactory(org.apache.flink.streaming.api.operators.UdfStreamOperatorFactory) OperatorID(org.apache.flink.runtime.jobgraph.OperatorID) Optional(java.util.Optional) CheckpointConfig(org.apache.flink.streaming.api.environment.CheckpointConfig) StreamIterationTail(org.apache.flink.streaming.runtime.tasks.StreamIterationTail) IllegalConfigurationException(org.apache.flink.configuration.IllegalConfigurationException) JobEdge(org.apache.flink.runtime.jobgraph.JobEdge) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) TaskInvokable(org.apache.flink.runtime.jobgraph.tasks.TaskInvokable) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) LogicalVertex(org.apache.flink.runtime.jobgraph.topology.LogicalVertex) ForwardForConsecutiveHashPartitioner(org.apache.flink.streaming.runtime.partitioner.ForwardForConsecutiveHashPartitioner) ManagedMemoryUtils(org.apache.flink.runtime.util.config.memory.ManagedMemoryUtils) StreamOperatorFactory(org.apache.flink.streaming.api.operators.StreamOperatorFactory) InputOutputFormatVertex(org.apache.flink.runtime.jobgraph.InputOutputFormatVertex) ResultPartitionType(org.apache.flink.runtime.io.network.partition.ResultPartitionType) HashMap(java.util.HashMap) SourceOperatorFactory(org.apache.flink.streaming.api.operators.SourceOperatorFactory) ArrayList(java.util.ArrayList) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) HashSet(java.util.HashSet) StreamPartitioner(org.apache.flink.streaming.runtime.partitioner.StreamPartitioner) JobGraphUtils(org.apache.flink.runtime.jobgraph.JobGraphUtils) ExecutionCheckpointingOptions(org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions) StreamIterationHead(org.apache.flink.streaming.runtime.tasks.StreamIterationHead) LinkedList(java.util.LinkedList) DistributionPattern(org.apache.flink.runtime.jobgraph.DistributionPattern) Nullable(javax.annotation.Nullable) Preconditions.checkState(org.apache.flink.util.Preconditions.checkState) Logger(org.slf4j.Logger) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) Configuration(org.apache.flink.configuration.Configuration) IOException(java.io.IOException) OperatorIDPair(org.apache.flink.runtime.OperatorIDPair) ForwardForUnspecifiedPartitioner(org.apache.flink.streaming.runtime.partitioner.ForwardForUnspecifiedPartitioner) VisibleForTesting(org.apache.flink.annotation.VisibleForTesting) MasterTriggerRestoreHook(org.apache.flink.runtime.checkpoint.MasterTriggerRestoreHook) RescalePartitioner(org.apache.flink.streaming.runtime.partitioner.RescalePartitioner) StreamExchangeMode(org.apache.flink.streaming.api.transformations.StreamExchangeMode) JobID(org.apache.flink.api.common.JobID) DefaultLogicalTopology(org.apache.flink.runtime.jobgraph.topology.DefaultLogicalTopology) OperatorCoordinator(org.apache.flink.runtime.operators.coordination.OperatorCoordinator) InputOutputFormatContainer(org.apache.flink.runtime.jobgraph.InputOutputFormatContainer) Internal(org.apache.flink.annotation.Internal) Comparator(java.util.Comparator) Collections(java.util.Collections) CheckpointRetentionPolicy(org.apache.flink.runtime.checkpoint.CheckpointRetentionPolicy) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap)

Example 19 with SlotSharingGroup

use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.

the class StreamingJobGraphGenerator method setCoLocation.

private void setCoLocation() {
    final Map<String, Tuple2<SlotSharingGroup, CoLocationGroupImpl>> coLocationGroups = new HashMap<>();
    for (Map.Entry<Integer, JobVertex> entry : jobVertices.entrySet()) {
        final StreamNode node = streamGraph.getStreamNode(entry.getKey());
        final JobVertex vertex = entry.getValue();
        final SlotSharingGroup sharingGroup = vertex.getSlotSharingGroup();
        // configure co-location constraint
        final String coLocationGroupKey = node.getCoLocationGroup();
        if (coLocationGroupKey != null) {
            if (sharingGroup == null) {
                throw new IllegalStateException("Cannot use a co-location constraint without a slot sharing group");
            }
            Tuple2<SlotSharingGroup, CoLocationGroupImpl> constraint = coLocationGroups.computeIfAbsent(coLocationGroupKey, k -> new Tuple2<>(sharingGroup, new CoLocationGroupImpl()));
            if (constraint.f0 != sharingGroup) {
                throw new IllegalStateException("Cannot co-locate operators from different slot sharing groups");
            }
            vertex.updateCoLocationGroup(constraint.f1);
            constraint.f1.addVertex(vertex);
        }
    }
}
Also used : IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Tuple2(org.apache.flink.api.java.tuple.Tuple2) CoLocationGroupImpl(org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroupImpl) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) HashMap(java.util.HashMap)

Example 20 with SlotSharingGroup

use of org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup in project flink by apache.

the class TaskCancelAsyncProducerConsumerITCase method testCancelAsyncProducerAndConsumer.

/**
 * Tests that a task waiting on an async producer/consumer that is stuck in a blocking buffer
 * request can be properly cancelled.
 *
 * <p>This is currently required for the Flink Kafka sources, which spawn a separate Thread
 * consuming from Kafka and producing the intermediate streams in the spawned Thread instead of
 * the main task Thread.
 */
@Test
public void testCancelAsyncProducerAndConsumer(@InjectMiniCluster MiniCluster flink) throws Exception {
    Deadline deadline = Deadline.now().plus(Duration.ofMinutes(2));
    // Job with async producer and consumer
    JobVertex producer = new JobVertex("AsyncProducer");
    producer.setParallelism(1);
    producer.setInvokableClass(AsyncProducer.class);
    JobVertex consumer = new JobVertex("AsyncConsumer");
    consumer.setParallelism(1);
    consumer.setInvokableClass(AsyncConsumer.class);
    consumer.connectNewDataSetAsInput(producer, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
    SlotSharingGroup slot = new SlotSharingGroup();
    producer.setSlotSharingGroup(slot);
    consumer.setSlotSharingGroup(slot);
    JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(producer, consumer);
    // Submit job and wait until running
    flink.runDetached(jobGraph);
    FutureUtils.retrySuccessfulWithDelay(() -> flink.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> status == JobStatus.RUNNING, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
    boolean producerBlocked = false;
    for (int i = 0; i < 50; i++) {
        Thread thread = ASYNC_PRODUCER_THREAD;
        if (thread != null && thread.isAlive()) {
            StackTraceElement[] stackTrace = thread.getStackTrace();
            producerBlocked = isInBlockingBufferRequest(stackTrace);
        }
        if (producerBlocked) {
            break;
        } else {
            // Retry
            Thread.sleep(500L);
        }
    }
    // Verify that async producer is in blocking request
    assertTrue("Producer thread is not blocked: " + Arrays.toString(ASYNC_PRODUCER_THREAD.getStackTrace()), producerBlocked);
    boolean consumerWaiting = false;
    for (int i = 0; i < 50; i++) {
        Thread thread = ASYNC_CONSUMER_THREAD;
        if (thread != null && thread.isAlive()) {
            consumerWaiting = thread.getState() == Thread.State.WAITING;
        }
        if (consumerWaiting) {
            break;
        } else {
            // Retry
            Thread.sleep(500L);
        }
    }
    // Verify that async consumer is in blocking request
    assertTrue("Consumer thread is not blocked.", consumerWaiting);
    flink.cancelJob(jobGraph.getJobID()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
    // wait until the job is canceled
    FutureUtils.retrySuccessfulWithDelay(() -> flink.getJobStatus(jobGraph.getJobID()), Time.milliseconds(10), deadline, status -> status == JobStatus.CANCELED, TestingUtils.defaultScheduledExecutor()).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
    // Verify the expected Exceptions
    assertNotNull(ASYNC_PRODUCER_EXCEPTION);
    assertEquals(CancelTaskException.class, ASYNC_PRODUCER_EXCEPTION.getClass());
    assertNotNull(ASYNC_CONSUMER_EXCEPTION);
    assertEquals(IllegalStateException.class, ASYNC_CONSUMER_EXCEPTION.getClass());
}
Also used : RecordWriterBuilder(org.apache.flink.runtime.io.network.api.writer.RecordWriterBuilder) Deadline(org.apache.flink.api.common.time.Deadline) Arrays(java.util.Arrays) InternalMiniClusterExtension(org.apache.flink.runtime.testutils.InternalMiniClusterExtension) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) ResultPartitionWriter(org.apache.flink.runtime.io.network.api.writer.ResultPartitionWriter) ResultPartitionType(org.apache.flink.runtime.io.network.partition.ResultPartitionType) JobStatus(org.apache.flink.api.common.JobStatus) MemorySize(org.apache.flink.configuration.MemorySize) MiniClusterResourceConfiguration(org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration) NettyShuffleEnvironmentOptions(org.apache.flink.configuration.NettyShuffleEnvironmentOptions) TestLoggerExtension(org.apache.flink.util.TestLoggerExtension) TaskManagerOptions(org.apache.flink.configuration.TaskManagerOptions) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) RegisterExtension(org.junit.jupiter.api.extension.RegisterExtension) Duration(java.time.Duration) JobGraphTestUtils(org.apache.flink.runtime.jobgraph.JobGraphTestUtils) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) DistributionPattern(org.apache.flink.runtime.jobgraph.DistributionPattern) InputGate(org.apache.flink.runtime.io.network.partition.consumer.InputGate) CancelTaskException(org.apache.flink.runtime.execution.CancelTaskException) LongValue(org.apache.flink.types.LongValue) Assert.assertNotNull(org.junit.Assert.assertNotNull) Configuration(org.apache.flink.configuration.Configuration) AbstractInvokable(org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.jupiter.api.Test) TimeUnit(java.util.concurrent.TimeUnit) TestingUtils(org.apache.flink.testutils.TestingUtils) RecordWriter(org.apache.flink.runtime.io.network.api.writer.RecordWriter) InjectMiniCluster(org.apache.flink.test.junit5.InjectMiniCluster) Time(org.apache.flink.api.common.time.Time) Environment(org.apache.flink.runtime.execution.Environment) LocalBufferPoolDestroyTest.isInBlockingBufferRequest(org.apache.flink.runtime.io.network.buffer.LocalBufferPoolDestroyTest.isInBlockingBufferRequest) Assert.assertEquals(org.junit.Assert.assertEquals) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) Deadline(org.apache.flink.api.common.time.Deadline) SlotSharingGroup(org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup) Test(org.junit.jupiter.api.Test)

Aggregations

SlotSharingGroup (org.apache.flink.runtime.jobmanager.scheduler.SlotSharingGroup)53 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)35 Test (org.junit.Test)30 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)18 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)14 JobID (org.apache.flink.api.common.JobID)11 HashMap (java.util.HashMap)8 Configuration (org.apache.flink.configuration.Configuration)8 ArrayList (java.util.ArrayList)7 HashSet (java.util.HashSet)6 Map (java.util.Map)6 Set (java.util.Set)6 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)6 ResultPartitionType (org.apache.flink.runtime.io.network.partition.ResultPartitionType)6 CoLocationGroup (org.apache.flink.runtime.jobmanager.scheduler.CoLocationGroup)6 IOException (java.io.IOException)5 Arrays (java.util.Arrays)5 IdentityHashMap (java.util.IdentityHashMap)5 Collections (java.util.Collections)4 Comparator (java.util.Comparator)4