Search in sources :

Example 21 with ConsumedPartitionGroup

use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.

the class PointwisePatternTest method testHighToLow.

private void testHighToLow(int highDop, int lowDop) throws Exception {
    if (highDop < lowDop) {
        throw new IllegalArgumentException();
    }
    final int factor = highDop / lowDop;
    final int delta = highDop % lowDop == 0 ? 0 : 1;
    ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(highDop, lowDop);
    int[] timesUsed = new int[highDop];
    for (ExecutionVertex ev : target.getTaskVertices()) {
        assertEquals(1, ev.getNumberOfInputs());
        List<IntermediateResultPartitionID> consumedPartitions = new ArrayList<>();
        for (ConsumedPartitionGroup partitionGroup : ev.getAllConsumedPartitionGroups()) {
            for (IntermediateResultPartitionID partitionId : partitionGroup) {
                consumedPartitions.add(partitionId);
            }
        }
        assertTrue(consumedPartitions.size() >= factor && consumedPartitions.size() <= factor + delta);
        for (IntermediateResultPartitionID consumedPartition : consumedPartitions) {
            timesUsed[consumedPartition.getPartitionNumber()]++;
        }
    }
    for (int used : timesUsed) {
        assertEquals(1, used);
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) ArrayList(java.util.ArrayList) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Example 22 with ConsumedPartitionGroup

use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.

the class PointwisePatternTest method testNTo2N.

@Test
public void testNTo2N() throws Exception {
    final int N = 41;
    ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(N, 2 * N);
    for (ExecutionVertex ev : target.getTaskVertices()) {
        assertEquals(1, ev.getNumberOfInputs());
        ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
        assertEquals(1, consumedPartitionGroup.size());
        assertEquals(ev.getParallelSubtaskIndex() / 2, consumedPartitionGroup.getFirst().getPartitionNumber());
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) Test(org.junit.Test)

Example 23 with ConsumedPartitionGroup

use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.

the class PointwisePatternTest method testNToN.

@Test
public void testNToN() throws Exception {
    final int N = 23;
    ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(N, N);
    for (ExecutionVertex ev : target.getTaskVertices()) {
        assertEquals(1, ev.getNumberOfInputs());
        ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
        assertEquals(1, consumedPartitionGroup.size());
        assertEquals(ev.getParallelSubtaskIndex(), consumedPartitionGroup.getFirst().getPartitionNumber());
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) Test(org.junit.Test)

Example 24 with ConsumedPartitionGroup

use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.

the class PointwisePatternTest method testConnections.

/**
 * Verify the connections between upstream result partitions and downstream vertices.
 */
private void testConnections(int sourceParallelism, int targetParallelism, int[][] expectedConsumedPartitionNumber) throws Exception {
    ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(sourceParallelism, targetParallelism);
    for (int vertexIndex = 0; vertexIndex < target.getTaskVertices().length; vertexIndex++) {
        ExecutionVertex ev = target.getTaskVertices()[vertexIndex];
        ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
        assertEquals(expectedConsumedPartitionNumber[vertexIndex].length, consumedPartitionGroup.size());
        int partitionIndex = 0;
        for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
            assertEquals(expectedConsumedPartitionNumber[vertexIndex][partitionIndex++], partitionId.getPartitionNumber());
        }
    }
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)

Example 25 with ConsumedPartitionGroup

use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.

the class IntermediateResultPartitionTest method testBlockingPartitionResetting.

@Test
public void testBlockingPartitionResetting() throws Exception {
    IntermediateResult result = createResult(ResultPartitionType.BLOCKING, 2);
    IntermediateResultPartition partition1 = result.getPartitions()[0];
    IntermediateResultPartition partition2 = result.getPartitions()[1];
    ConsumedPartitionGroup consumedPartitionGroup = partition1.getConsumedPartitionGroups().get(0);
    // Not consumable on init
    assertFalse(partition1.isConsumable());
    assertFalse(partition2.isConsumable());
    // Not consumable if partition1 is FINISHED
    partition1.markFinished();
    assertEquals(1, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
    assertTrue(partition1.isConsumable());
    assertFalse(partition2.isConsumable());
    assertFalse(consumedPartitionGroup.areAllPartitionsFinished());
    // Reset the result and mark partition2 FINISHED, the result should still not be consumable
    result.resetForNewExecution();
    assertEquals(2, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
    partition2.markFinished();
    assertEquals(1, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
    assertFalse(partition1.isConsumable());
    assertTrue(partition2.isConsumable());
    assertFalse(consumedPartitionGroup.areAllPartitionsFinished());
    // Consumable after all partitions are FINISHED
    partition1.markFinished();
    assertEquals(0, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
    assertTrue(partition1.isConsumable());
    assertTrue(partition2.isConsumable());
    assertTrue(consumedPartitionGroup.areAllPartitionsFinished());
    // Not consumable again if failover happens
    result.resetForNewExecution();
    assertEquals(2, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
    assertFalse(partition1.isConsumable());
    assertFalse(partition2.isConsumable());
    assertFalse(consumedPartitionGroup.areAllPartitionsFinished());
}
Also used : ConsumedPartitionGroup(org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup) Test(org.junit.Test)

Aggregations

ConsumedPartitionGroup (org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup)30 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)15 Test (org.junit.Test)12 ArrayList (java.util.ArrayList)9 ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)6 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)4 SchedulingResultPartition (org.apache.flink.runtime.scheduler.strategy.SchedulingResultPartition)4 HashSet (java.util.HashSet)3 List (java.util.List)3 IntermediateDataSetID (org.apache.flink.runtime.jobgraph.IntermediateDataSetID)3 ConsumerVertexGroup (org.apache.flink.runtime.scheduler.strategy.ConsumerVertexGroup)3 Collection (java.util.Collection)2 Collections (java.util.Collections)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 ExecutionState (org.apache.flink.runtime.execution.ExecutionState)2 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)2 IntermediateResultPartition (org.apache.flink.runtime.executiongraph.IntermediateResultPartition)2 ResultPartitionType (org.apache.flink.runtime.io.network.partition.ResultPartitionType)2 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)2