use of org.apache.flink.runtime.jobgraph.ScheduleMode in project flink by apache.
the class ExecutionVertexDeploymentTest method testTddProducedPartitionsLazyScheduling.
/**
* Tests that the lazy scheduling flag is correctly forwarded to the produced partition descriptors.
*/
@Test
public void testTddProducedPartitionsLazyScheduling() throws Exception {
TestingUtils.QueuedActionExecutionContext context = TestingUtils.queuedActionExecutionContext();
ExecutionJobVertex jobVertex = getExecutionVertex(new JobVertexID(), context);
IntermediateResult result = new IntermediateResult(new IntermediateDataSetID(), jobVertex, 1, ResultPartitionType.PIPELINED);
ExecutionVertex vertex = new ExecutionVertex(jobVertex, 0, new IntermediateResult[] { result }, Time.minutes(1));
ExecutionEdge mockEdge = createMockExecutionEdge(1);
result.getPartitions()[0].addConsumerGroup();
result.getPartitions()[0].addConsumer(mockEdge, 0);
AllocatedSlot allocatedSlot = mock(AllocatedSlot.class);
when(allocatedSlot.getSlotAllocationId()).thenReturn(new AllocationID());
Slot root = mock(Slot.class);
when(root.getSlotNumber()).thenReturn(1);
SimpleSlot slot = mock(SimpleSlot.class);
when(slot.getRoot()).thenReturn(root);
when(slot.getAllocatedSlot()).thenReturn(allocatedSlot);
when(root.getAllocatedSlot()).thenReturn(allocatedSlot);
for (ScheduleMode mode : ScheduleMode.values()) {
vertex.getExecutionGraph().setScheduleMode(mode);
TaskDeploymentDescriptor tdd = vertex.createDeploymentDescriptor(new ExecutionAttemptID(), slot, null, 1);
Collection<ResultPartitionDeploymentDescriptor> producedPartitions = tdd.getProducedPartitions();
assertEquals(1, producedPartitions.size());
ResultPartitionDeploymentDescriptor desc = producedPartitions.iterator().next();
assertEquals(mode.allowLazyDeployment(), desc.sendScheduleOrUpdateConsumersMessage());
}
}
Aggregations