use of org.apache.flink.runtime.executiongraph.DefaultExecutionGraph in project flink by apache.
the class AdaptiveBatchSchedulerTest method testAdaptiveBatchScheduler.
@Test
public void testAdaptiveBatchScheduler() throws Exception {
JobGraph jobGraph = createJobGraph(false);
Iterator<JobVertex> jobVertexIterator = jobGraph.getVertices().iterator();
JobVertex source1 = jobVertexIterator.next();
JobVertex source2 = jobVertexIterator.next();
JobVertex sink = jobVertexIterator.next();
SchedulerBase scheduler = createScheduler(jobGraph);
final DefaultExecutionGraph graph = (DefaultExecutionGraph) scheduler.getExecutionGraph();
final ExecutionJobVertex sinkExecutionJobVertex = graph.getJobVertex(sink.getID());
scheduler.startScheduling();
assertThat(sinkExecutionJobVertex.getParallelism(), is(-1));
// trigger source1 finished.
transitionExecutionsState(scheduler, ExecutionState.FINISHED, source1);
assertThat(sinkExecutionJobVertex.getParallelism(), is(-1));
// trigger source2 finished.
transitionExecutionsState(scheduler, ExecutionState.FINISHED, source2);
assertThat(sinkExecutionJobVertex.getParallelism(), is(10));
// check that the jobGraph is updated
assertThat(sink.getParallelism(), is(10));
}
use of org.apache.flink.runtime.executiongraph.DefaultExecutionGraph in project flink by apache.
the class DefaultSchedulingPipelinedRegionTest method returnsIncidentBlockingPartitions.
/**
* Tests if the consumed inputs of the pipelined regions are computed correctly using the Job
* graph below.
*
* <pre>
* c
* / X
* a -+- b e
* \ /
* d
* </pre>
*
* <p>Pipelined regions: {a}, {b, c, d, e}
*/
@Test
public void returnsIncidentBlockingPartitions() throws Exception {
final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1);
b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final DefaultExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e);
final DefaultExecutionTopology topology = DefaultExecutionTopology.fromExecutionGraph(simpleTestGraph);
final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0));
final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0));
final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0));
final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId();
final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = new HashSet<>();
for (ConsumedPartitionGroup consumedPartitionGroup : secondPipelinedRegion.getAllBlockingConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
if (!secondPipelinedRegion.contains(topology.getResultPartition(partitionId).getProducer().getId())) {
secondPipelinedRegionConsumedResults.add(partitionId);
}
}
}
assertThat(firstPipelinedRegion.getAllBlockingConsumedPartitionGroups().iterator().hasNext(), is(false));
assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition));
}
use of org.apache.flink.runtime.executiongraph.DefaultExecutionGraph in project flink by apache.
the class DefaultExecutionTopologyTest method testErrorIfCoLocatedTasksAreNotInSameRegion.
@Test(expected = IllegalStateException.class)
public void testErrorIfCoLocatedTasksAreNotInSameRegion() throws Exception {
int parallelism = 3;
final JobVertex v1 = createNoOpVertex(parallelism);
final JobVertex v2 = createNoOpVertex(parallelism);
SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
v1.setSlotSharingGroup(slotSharingGroup);
v2.setSlotSharingGroup(slotSharingGroup);
v1.setStrictlyCoLocatedWith(v2);
final DefaultExecutionGraph executionGraph = createSimpleTestGraph(v1, v2);
DefaultExecutionTopology.fromExecutionGraph(executionGraph);
}
use of org.apache.flink.runtime.executiongraph.DefaultExecutionGraph in project flink by apache.
the class SsgNetworkMemoryCalculationUtilsTest method testGenerateEnrichedResourceProfileForDynamicGraph.
@Test
public void testGenerateEnrichedResourceProfileForDynamicGraph() throws Exception {
List<SlotSharingGroup> slotSharingGroups = Arrays.asList(new SlotSharingGroup(), new SlotSharingGroup(), new SlotSharingGroup());
for (SlotSharingGroup group : slotSharingGroups) {
group.setResourceProfile(DEFAULT_RESOURCE);
}
DefaultExecutionGraph executionGraph = createDynamicExecutionGraph(slotSharingGroups, 20);
Iterator<ExecutionJobVertex> jobVertices = executionGraph.getVerticesTopologically().iterator();
ExecutionJobVertex source = jobVertices.next();
ExecutionJobVertex map = jobVertices.next();
ExecutionJobVertex sink = jobVertices.next();
executionGraph.initializeJobVertex(source, 0L);
triggerComputeNumOfSubpartitions(source.getProducedDataSets()[0]);
map.setParallelism(5);
executionGraph.initializeJobVertex(map, 0L);
triggerComputeNumOfSubpartitions(map.getProducedDataSets()[0]);
sink.setParallelism(7);
executionGraph.initializeJobVertex(sink, 0L);
assertNetworkMemory(slotSharingGroups, Arrays.asList(new MemorySize(TestShuffleMaster.computeRequiredShuffleMemoryBytes(0, 5)), new MemorySize(TestShuffleMaster.computeRequiredShuffleMemoryBytes(5, 20)), new MemorySize(TestShuffleMaster.computeRequiredShuffleMemoryBytes(15, 0))));
}
use of org.apache.flink.runtime.executiongraph.DefaultExecutionGraph in project flink by apache.
the class SsgNetworkMemoryCalculationUtilsTest method testGetMaxInputChannelNumForResult.
private void testGetMaxInputChannelNumForResult(DistributionPattern distributionPattern, int producerParallelism, int consumerMaxParallelism, int decidedConsumerParallelism, int expectedNumChannels) throws Exception {
final DefaultExecutionGraph eg = (DefaultExecutionGraph) IntermediateResultPartitionTest.createExecutionGraph(producerParallelism, -1, consumerMaxParallelism, distributionPattern, true);
final Iterator<ExecutionJobVertex> vertexIterator = eg.getVerticesTopologically().iterator();
final ExecutionJobVertex producer = vertexIterator.next();
final ExecutionJobVertex consumer = vertexIterator.next();
eg.initializeJobVertex(producer, 0L);
final IntermediateResult result = producer.getProducedDataSets()[0];
triggerComputeNumOfSubpartitions(result);
consumer.setParallelism(decidedConsumerParallelism);
eg.initializeJobVertex(consumer, 0L);
Map<IntermediateDataSetID, Integer> maxInputChannelNums = SsgNetworkMemoryCalculationUtils.getMaxInputChannelNumsForDynamicGraph(consumer);
assertThat(maxInputChannelNums.size(), is(1));
assertThat(maxInputChannelNums.get(result.getId()), is(expectedNumChannels));
}
Aggregations