use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class CheckpointCoordinatorFailureTest method testFailingCompletedCheckpointStoreAdd.
/**
* Tests that a failure while storing a completed checkpoint in the completed checkpoint store
* will properly fail the originating pending checkpoint and clean upt the completed checkpoint.
*/
@Test
public void testFailingCompletedCheckpointStoreAdd() throws Exception {
JobID jid = new JobID();
final ExecutionAttemptID executionAttemptId = new ExecutionAttemptID();
final ExecutionVertex vertex = CheckpointCoordinatorTest.mockExecutionVertex(executionAttemptId);
final long triggerTimestamp = 1L;
// set up the coordinator and validate the initial state
CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, ExternalizedCheckpointSettings.none(), new ExecutionVertex[] { vertex }, new ExecutionVertex[] { vertex }, new ExecutionVertex[] { vertex }, new StandaloneCheckpointIDCounter(), new FailingCompletedCheckpointStore(), null, Executors.directExecutor());
coord.triggerCheckpoint(triggerTimestamp, false);
assertEquals(1, coord.getNumberOfPendingCheckpoints());
PendingCheckpoint pendingCheckpoint = coord.getPendingCheckpoints().values().iterator().next();
assertFalse(pendingCheckpoint.isDiscarded());
final long checkpointId = coord.getPendingCheckpoints().keySet().iterator().next();
AcknowledgeCheckpoint acknowledgeMessage = new AcknowledgeCheckpoint(jid, executionAttemptId, checkpointId);
CompletedCheckpoint completedCheckpoint = mock(CompletedCheckpoint.class);
PowerMockito.whenNew(CompletedCheckpoint.class).withAnyArguments().thenReturn(completedCheckpoint);
try {
coord.receiveAcknowledgeMessage(acknowledgeMessage);
fail("Expected a checkpoint exception because the completed checkpoint store could not " + "store the completed checkpoint.");
} catch (CheckpointException e) {
// ignore because we expected this exception
}
// make sure that the pending checkpoint has been discarded after we could not complete it
assertTrue(pendingCheckpoint.isDiscarded());
verify(completedCheckpoint).discard();
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class CheckpointCoordinatorTest method testCheckpointAbortsIfTriggerTasksAreNotExecuted.
@Test
public void testCheckpointAbortsIfTriggerTasksAreNotExecuted() {
try {
final JobID jid = new JobID();
final long timestamp = System.currentTimeMillis();
// create some mock Execution vertices that receive the checkpoint trigger messages
ExecutionVertex triggerVertex1 = mock(ExecutionVertex.class);
ExecutionVertex triggerVertex2 = mock(ExecutionVertex.class);
// create some mock Execution vertices that need to ack the checkpoint
final ExecutionAttemptID ackAttemptID1 = new ExecutionAttemptID();
final ExecutionAttemptID ackAttemptID2 = new ExecutionAttemptID();
ExecutionVertex ackVertex1 = mockExecutionVertex(ackAttemptID1);
ExecutionVertex ackVertex2 = mockExecutionVertex(ackAttemptID2);
// set up the coordinator and validate the initial state
CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, ExternalizedCheckpointSettings.none(), new ExecutionVertex[] { triggerVertex1, triggerVertex2 }, new ExecutionVertex[] { ackVertex1, ackVertex2 }, new ExecutionVertex[] {}, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), null, Executors.directExecutor());
// nothing should be happening
assertEquals(0, coord.getNumberOfPendingCheckpoints());
assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints());
// trigger the first checkpoint. this should not succeed
assertFalse(coord.triggerCheckpoint(timestamp, false));
// still, nothing should be happening
assertEquals(0, coord.getNumberOfPendingCheckpoints());
assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints());
coord.shutdown(JobStatus.FINISHED);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class SchedulerTestUtils method getTestVertex.
public static Execution getTestVertex(Iterable<TaskManagerLocation> preferredLocations) {
ExecutionVertex vertex = mock(ExecutionVertex.class);
when(vertex.getPreferredLocationsBasedOnInputs()).thenReturn(preferredLocations);
when(vertex.getJobId()).thenReturn(new JobID());
when(vertex.toString()).thenReturn("TEST-VERTEX");
Execution execution = mock(Execution.class);
when(execution.getVertex()).thenReturn(vertex);
return execution;
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class SchedulerTestUtils method getTestVertex.
public static Execution getTestVertex(JobVertexID jid, int taskIndex, int numTasks) {
ExecutionVertex vertex = mock(ExecutionVertex.class);
when(vertex.getPreferredLocationsBasedOnInputs()).thenReturn(null);
when(vertex.getJobId()).thenReturn(new JobID());
when(vertex.getJobvertexId()).thenReturn(jid);
when(vertex.getParallelSubtaskIndex()).thenReturn(taskIndex);
when(vertex.getTotalNumberOfParallelSubtasks()).thenReturn(numTasks);
when(vertex.getMaxParallelism()).thenReturn(numTasks);
when(vertex.toString()).thenReturn("TEST-VERTEX");
when(vertex.getSimpleName()).thenReturn("TEST-VERTEX");
Execution execution = mock(Execution.class);
when(execution.getVertex()).thenReturn(vertex);
return execution;
}
use of org.apache.flink.runtime.executiongraph.ExecutionVertex in project flink by apache.
the class RescalePartitionerTest method testExecutionGraphGeneration.
@Test
public void testExecutionGraphGeneration() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
// get input data
DataStream<String> text = env.addSource(new ParallelSourceFunction<String>() {
private static final long serialVersionUID = 7772338606389180774L;
@Override
public void run(SourceContext<String> ctx) throws Exception {
}
@Override
public void cancel() {
}
}).setParallelism(2);
DataStream<Tuple2<String, Integer>> counts = text.rescale().flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
private static final long serialVersionUID = -5255930322161596829L;
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
}
});
counts.rescale().print().setParallelism(2);
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
final JobID jobId = new JobID();
final String jobName = "Semi-Rebalance Test Job";
final Configuration cfg = new Configuration();
List<JobVertex> jobVertices = jobGraph.getVerticesSortedTopologicallyFromSources();
JobVertex sourceVertex = jobVertices.get(0);
JobVertex mapVertex = jobVertices.get(1);
JobVertex sinkVertex = jobVertices.get(2);
assertEquals(2, sourceVertex.getParallelism());
assertEquals(4, mapVertex.getParallelism());
assertEquals(2, sinkVertex.getParallelism());
ExecutionGraph eg = new ExecutionGraph(TestingUtils.defaultExecutor(), TestingUtils.defaultExecutor(), jobId, jobName, cfg, new SerializedValue<>(new ExecutionConfig()), AkkaUtils.getDefaultTimeout(), new NoRestartStrategy(), new ArrayList<BlobKey>(), new ArrayList<URL>(), new Scheduler(TestingUtils.defaultExecutionContext()), ExecutionGraph.class.getClassLoader(), new UnregisteredMetricsGroup());
try {
eg.attachJobGraph(jobVertices);
} catch (JobException e) {
e.printStackTrace();
fail("Building ExecutionGraph failed: " + e.getMessage());
}
ExecutionJobVertex execSourceVertex = eg.getJobVertex(sourceVertex.getID());
ExecutionJobVertex execMapVertex = eg.getJobVertex(mapVertex.getID());
ExecutionJobVertex execSinkVertex = eg.getJobVertex(sinkVertex.getID());
assertEquals(0, execSourceVertex.getInputs().size());
assertEquals(1, execMapVertex.getInputs().size());
assertEquals(4, execMapVertex.getParallelism());
ExecutionVertex[] mapTaskVertices = execMapVertex.getTaskVertices();
// verify that we have each parallel input partition exactly twice, i.e. that one source
// sends to two unique mappers
Map<Integer, Integer> mapInputPartitionCounts = new HashMap<>();
for (ExecutionVertex mapTaskVertex : mapTaskVertices) {
assertEquals(1, mapTaskVertex.getNumberOfInputs());
assertEquals(1, mapTaskVertex.getInputEdges(0).length);
ExecutionEdge inputEdge = mapTaskVertex.getInputEdges(0)[0];
assertEquals(sourceVertex.getID(), inputEdge.getSource().getProducer().getJobvertexId());
int inputPartition = inputEdge.getSource().getPartitionNumber();
if (!mapInputPartitionCounts.containsKey(inputPartition)) {
mapInputPartitionCounts.put(inputPartition, 1);
} else {
mapInputPartitionCounts.put(inputPartition, mapInputPartitionCounts.get(inputPartition) + 1);
}
}
assertEquals(2, mapInputPartitionCounts.size());
for (int count : mapInputPartitionCounts.values()) {
assertEquals(2, count);
}
assertEquals(1, execSinkVertex.getInputs().size());
assertEquals(2, execSinkVertex.getParallelism());
ExecutionVertex[] sinkTaskVertices = execSinkVertex.getTaskVertices();
// verify each sink instance has two inputs from the map and that each map subpartition
// only occurs in one unique input edge
Set<Integer> mapSubpartitions = new HashSet<>();
for (ExecutionVertex sinkTaskVertex : sinkTaskVertices) {
assertEquals(1, sinkTaskVertex.getNumberOfInputs());
assertEquals(2, sinkTaskVertex.getInputEdges(0).length);
ExecutionEdge inputEdge1 = sinkTaskVertex.getInputEdges(0)[0];
ExecutionEdge inputEdge2 = sinkTaskVertex.getInputEdges(0)[1];
assertEquals(mapVertex.getID(), inputEdge1.getSource().getProducer().getJobvertexId());
assertEquals(mapVertex.getID(), inputEdge2.getSource().getProducer().getJobvertexId());
int inputPartition1 = inputEdge1.getSource().getPartitionNumber();
assertFalse(mapSubpartitions.contains(inputPartition1));
mapSubpartitions.add(inputPartition1);
int inputPartition2 = inputEdge2.getSource().getPartitionNumber();
assertFalse(mapSubpartitions.contains(inputPartition2));
mapSubpartitions.add(inputPartition2);
}
assertEquals(4, mapSubpartitions.size());
}
Aggregations