use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class PointwisePatternTest method testHighToLow.
private void testHighToLow(int highDop, int lowDop) throws Exception {
if (highDop < lowDop) {
throw new IllegalArgumentException();
}
final int factor = highDop / lowDop;
final int delta = highDop % lowDop == 0 ? 0 : 1;
ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(highDop, lowDop);
int[] timesUsed = new int[highDop];
for (ExecutionVertex ev : target.getTaskVertices()) {
assertEquals(1, ev.getNumberOfInputs());
List<IntermediateResultPartitionID> consumedPartitions = new ArrayList<>();
for (ConsumedPartitionGroup partitionGroup : ev.getAllConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : partitionGroup) {
consumedPartitions.add(partitionId);
}
}
assertTrue(consumedPartitions.size() >= factor && consumedPartitions.size() <= factor + delta);
for (IntermediateResultPartitionID consumedPartition : consumedPartitions) {
timesUsed[consumedPartition.getPartitionNumber()]++;
}
}
for (int used : timesUsed) {
assertEquals(1, used);
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class PointwisePatternTest method testNTo2N.
@Test
public void testNTo2N() throws Exception {
final int N = 41;
ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(N, 2 * N);
for (ExecutionVertex ev : target.getTaskVertices()) {
assertEquals(1, ev.getNumberOfInputs());
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
assertEquals(1, consumedPartitionGroup.size());
assertEquals(ev.getParallelSubtaskIndex() / 2, consumedPartitionGroup.getFirst().getPartitionNumber());
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class PointwisePatternTest method testNToN.
@Test
public void testNToN() throws Exception {
final int N = 23;
ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(N, N);
for (ExecutionVertex ev : target.getTaskVertices()) {
assertEquals(1, ev.getNumberOfInputs());
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
assertEquals(1, consumedPartitionGroup.size());
assertEquals(ev.getParallelSubtaskIndex(), consumedPartitionGroup.getFirst().getPartitionNumber());
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class PointwisePatternTest method testConnections.
/**
* Verify the connections between upstream result partitions and downstream vertices.
*/
private void testConnections(int sourceParallelism, int targetParallelism, int[][] expectedConsumedPartitionNumber) throws Exception {
ExecutionJobVertex target = setUpExecutionGraphAndGetDownstreamVertex(sourceParallelism, targetParallelism);
for (int vertexIndex = 0; vertexIndex < target.getTaskVertices().length; vertexIndex++) {
ExecutionVertex ev = target.getTaskVertices()[vertexIndex];
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(0);
assertEquals(expectedConsumedPartitionNumber[vertexIndex].length, consumedPartitionGroup.size());
int partitionIndex = 0;
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
assertEquals(expectedConsumedPartitionNumber[vertexIndex][partitionIndex++], partitionId.getPartitionNumber());
}
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class IntermediateResultPartitionTest method testBlockingPartitionResetting.
@Test
public void testBlockingPartitionResetting() throws Exception {
IntermediateResult result = createResult(ResultPartitionType.BLOCKING, 2);
IntermediateResultPartition partition1 = result.getPartitions()[0];
IntermediateResultPartition partition2 = result.getPartitions()[1];
ConsumedPartitionGroup consumedPartitionGroup = partition1.getConsumedPartitionGroups().get(0);
// Not consumable on init
assertFalse(partition1.isConsumable());
assertFalse(partition2.isConsumable());
// Not consumable if partition1 is FINISHED
partition1.markFinished();
assertEquals(1, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
assertTrue(partition1.isConsumable());
assertFalse(partition2.isConsumable());
assertFalse(consumedPartitionGroup.areAllPartitionsFinished());
// Reset the result and mark partition2 FINISHED, the result should still not be consumable
result.resetForNewExecution();
assertEquals(2, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
partition2.markFinished();
assertEquals(1, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
assertFalse(partition1.isConsumable());
assertTrue(partition2.isConsumable());
assertFalse(consumedPartitionGroup.areAllPartitionsFinished());
// Consumable after all partitions are FINISHED
partition1.markFinished();
assertEquals(0, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
assertTrue(partition1.isConsumable());
assertTrue(partition2.isConsumable());
assertTrue(consumedPartitionGroup.areAllPartitionsFinished());
// Not consumable again if failover happens
result.resetForNewExecution();
assertEquals(2, consumedPartitionGroup.getNumberOfUnfinishedPartitions());
assertFalse(partition1.isConsumable());
assertFalse(partition2.isConsumable());
assertFalse(consumedPartitionGroup.areAllPartitionsFinished());
}
Aggregations