use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class CheckpointStateRestoreTest method testSetState.
/**
* Tests that on restore the task state is reset for each stateful task.
*/
@Test
public void testSetState() {
try {
KeyGroupRange keyGroupRange = KeyGroupRange.of(0, 0);
List<SerializableObject> testStates = Collections.singletonList(new SerializableObject());
final KeyedStateHandle serializedKeyGroupStates = CheckpointCoordinatorTestingUtils.generateKeyGroupState(keyGroupRange, testStates);
final JobVertexID statefulId = new JobVertexID();
final JobVertexID statelessId = new JobVertexID();
ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(statefulId, 3, 256).addJobVertex(statelessId, 2, 256).build();
ExecutionJobVertex stateful = graph.getJobVertex(statefulId);
ExecutionJobVertex stateless = graph.getJobVertex(statelessId);
ExecutionVertex stateful1 = stateful.getTaskVertices()[0];
ExecutionVertex stateful2 = stateful.getTaskVertices()[1];
ExecutionVertex stateful3 = stateful.getTaskVertices()[2];
ExecutionVertex stateless1 = stateless.getTaskVertices()[0];
ExecutionVertex stateless2 = stateless.getTaskVertices()[1];
Execution statefulExec1 = stateful1.getCurrentExecutionAttempt();
Execution statefulExec2 = stateful2.getCurrentExecutionAttempt();
Execution statefulExec3 = stateful3.getCurrentExecutionAttempt();
Execution statelessExec1 = stateless1.getCurrentExecutionAttempt();
Execution statelessExec2 = stateless2.getCurrentExecutionAttempt();
ManuallyTriggeredScheduledExecutor manuallyTriggeredScheduledExecutor = new ManuallyTriggeredScheduledExecutor();
CheckpointCoordinator coord = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setTimer(manuallyTriggeredScheduledExecutor).build();
// create ourselves a checkpoint with state
coord.triggerCheckpoint(false);
manuallyTriggeredScheduledExecutor.triggerAll();
PendingCheckpoint pending = coord.getPendingCheckpoints().values().iterator().next();
final long checkpointId = pending.getCheckpointId();
final TaskStateSnapshot subtaskStates = new TaskStateSnapshot();
subtaskStates.putSubtaskStateByOperatorID(OperatorID.fromJobVertexID(statefulId), OperatorSubtaskState.builder().setManagedKeyedState(serializedKeyGroupStates).build());
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(graph.getJobID(), statefulExec1.getAttemptId(), checkpointId, new CheckpointMetrics(), subtaskStates), TASK_MANAGER_LOCATION_INFO);
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(graph.getJobID(), statefulExec2.getAttemptId(), checkpointId, new CheckpointMetrics(), subtaskStates), TASK_MANAGER_LOCATION_INFO);
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(graph.getJobID(), statefulExec3.getAttemptId(), checkpointId, new CheckpointMetrics(), subtaskStates), TASK_MANAGER_LOCATION_INFO);
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(graph.getJobID(), statelessExec1.getAttemptId(), checkpointId), TASK_MANAGER_LOCATION_INFO);
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(graph.getJobID(), statelessExec2.getAttemptId(), checkpointId), TASK_MANAGER_LOCATION_INFO);
assertEquals(1, coord.getNumberOfRetainedSuccessfulCheckpoints());
assertEquals(0, coord.getNumberOfPendingCheckpoints());
// let the coordinator inject the state
assertTrue(coord.restoreLatestCheckpointedStateToAll(new HashSet<>(Arrays.asList(stateful, stateless)), false));
// verify that each stateful vertex got the state
assertEquals(subtaskStates, statefulExec1.getTaskRestore().getTaskStateSnapshot());
assertEquals(subtaskStates, statefulExec2.getTaskRestore().getTaskStateSnapshot());
assertEquals(subtaskStates, statefulExec3.getTaskRestore().getTaskStateSnapshot());
assertNull(statelessExec1.getTaskRestore());
assertNull(statelessExec2.getTaskRestore());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class DefaultCheckpointPlanTest method testAbortionIfPartiallyOperatorsFinishedVertexUsedUnionListState.
@Test
public void testAbortionIfPartiallyOperatorsFinishedVertexUsedUnionListState() throws Exception {
JobVertexID jobVertexId = new JobVertexID();
OperatorID operatorId = new OperatorID();
ExecutionGraph executionGraph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexId, 2, 2, Collections.singletonList(OperatorIDPair.generatedIDOnly(operatorId)), true).build();
ExecutionVertex[] tasks = executionGraph.getJobVertex(jobVertexId).getTaskVertices();
CheckpointPlan checkpointPlan = createCheckpointPlan(executionGraph);
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
OperatorState operatorState = new OperatorState(operatorId, 2, 2);
operatorState.putState(0, createSubtaskStateWithUnionListState(TEMPORARY_FOLDER.newFile()));
operatorState.putState(1, createSubtaskStateWithUnionListState(TEMPORARY_FOLDER.newFile()));
checkpointPlan.reportTaskHasFinishedOperators(tasks[1]);
operatorStates.put(operatorId, operatorState);
expectedException.expect(FlinkRuntimeException.class);
expectedException.expectMessage(String.format("The vertex %s (id = %s) has " + "used UnionListState, but part of its tasks has called operators' finish method.", executionGraph.getJobVertex(jobVertexId).getName(), jobVertexId));
checkpointPlan.fulfillFinishedTaskStatus(operatorStates);
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class DefaultCheckpointPlanTest method testFulfillFullyFinishedStatesWithCoordinator.
@Test
public void testFulfillFullyFinishedStatesWithCoordinator() throws Exception {
JobVertexID finishedJobVertexID = new JobVertexID();
OperatorID finishedOperatorID = new OperatorID();
ExecutionGraph executionGraph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(finishedJobVertexID, 1, 256, Collections.singletonList(OperatorIDPair.generatedIDOnly(finishedOperatorID)), true).build();
executionGraph.getJobVertex(finishedJobVertexID).getTaskVertices()[0].getCurrentExecutionAttempt().markFinished();
CheckpointPlan checkpointPlan = createCheckpointPlan(executionGraph);
Map<OperatorID, OperatorState> operatorStates = new HashMap<>();
OperatorState operatorState = new OperatorState(finishedOperatorID, 1, 256);
operatorState.setCoordinatorState(new TestingStreamStateHandle());
operatorStates.put(finishedOperatorID, operatorState);
checkpointPlan.fulfillFinishedTaskStatus(operatorStates);
assertEquals(1, operatorStates.size());
assertTrue(operatorStates.get(finishedOperatorID).isFullyFinished());
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class PipelinedRegionSchedulingStrategyTest method testScheduleBlockingDownstreamTaskIndividually.
@Test
public void testScheduleBlockingDownstreamTaskIndividually() throws Exception {
final JobVertex v1 = createJobVertex("v1", 2);
final JobVertex v2 = createJobVertex("v2", 2);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).build();
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
final PipelinedRegionSchedulingStrategy schedulingStrategy = startScheduling(schedulingTopology);
assertEquals(2, testingSchedulerOperation.getScheduledVertices().size());
final ExecutionVertex v11 = executionGraph.getJobVertex(v1.getID()).getTaskVertices()[0];
v11.finishAllBlockingPartitions();
schedulingStrategy.onExecutionStateChange(v11.getID(), ExecutionState.FINISHED);
assertEquals(3, testingSchedulerOperation.getScheduledVertices().size());
}
use of org.apache.flink.runtime.executiongraph.ExecutionGraph in project flink by apache.
the class PipelinedRegionSchedulingStrategyTest method testComputingCrossRegionConsumedPartitionGroupsCorrectly.
@Test
public void testComputingCrossRegionConsumedPartitionGroupsCorrectly() throws Exception {
final JobVertex v1 = createJobVertex("v1", 4);
final JobVertex v2 = createJobVertex("v2", 3);
final JobVertex v3 = createJobVertex("v3", 2);
v2.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
v3.connectNewDataSetAsInput(v1, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2, v3));
final JobGraph jobGraph = JobGraphBuilder.newBatchJobGraphBuilder().addJobVertices(ordered).build();
final ExecutionGraph executionGraph = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).build();
final SchedulingTopology schedulingTopology = executionGraph.getSchedulingTopology();
final PipelinedRegionSchedulingStrategy schedulingStrategy = new PipelinedRegionSchedulingStrategy(testingSchedulerOperation, schedulingTopology);
final Set<ConsumedPartitionGroup> crossRegionConsumedPartitionGroups = schedulingStrategy.getCrossRegionConsumedPartitionGroups();
assertEquals(1, crossRegionConsumedPartitionGroups.size());
final ConsumedPartitionGroup expected = executionGraph.getJobVertex(v3.getID()).getTaskVertices()[1].getAllConsumedPartitionGroups().get(0);
assertTrue(crossRegionConsumedPartitionGroups.contains(expected));
}
Aggregations