use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class CheckpointMessagesTest method testTriggerAndConfirmCheckpoint.
@Test
public void testTriggerAndConfirmCheckpoint() {
try {
NotifyCheckpointComplete cc = new NotifyCheckpointComplete(new JobID(), new ExecutionAttemptID(), 45287698767345L, 467L);
testSerializabilityEqualsHashCode(cc);
TriggerCheckpoint tc = new TriggerCheckpoint(new JobID(), new ExecutionAttemptID(), 347652734L, 7576752L, CheckpointOptions.forFullCheckpoint());
testSerializabilityEqualsHashCode(tc);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class StreamTaskTest method createTask.
public static Task createTask(Class<? extends AbstractInvokable> invokable, StreamConfig taskConfig, Configuration taskManagerConfig) throws Exception {
LibraryCacheManager libCache = mock(LibraryCacheManager.class);
when(libCache.getClassLoader(any(JobID.class))).thenReturn(StreamTaskTest.class.getClassLoader());
ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
ResultPartitionConsumableNotifier consumableNotifier = mock(ResultPartitionConsumableNotifier.class);
PartitionProducerStateChecker partitionProducerStateChecker = mock(PartitionProducerStateChecker.class);
Executor executor = mock(Executor.class);
NetworkEnvironment network = mock(NetworkEnvironment.class);
when(network.getResultPartitionManager()).thenReturn(partitionManager);
when(network.getDefaultIOMode()).thenReturn(IOManager.IOMode.SYNC);
when(network.createKvStateTaskRegistry(any(JobID.class), any(JobVertexID.class))).thenReturn(mock(TaskKvStateRegistry.class));
JobInformation jobInformation = new JobInformation(new JobID(), "Job Name", new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList());
TaskInformation taskInformation = new TaskInformation(new JobVertexID(), "Test Task", 1, 1, invokable.getName(), taskConfig.getConfiguration());
return new Task(jobInformation, taskInformation, new ExecutionAttemptID(), new AllocationID(), 0, 0, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), 0, new TaskStateHandles(), mock(MemoryManager.class), mock(IOManager.class), network, mock(BroadcastVariableManager.class), mock(TaskManagerActions.class), mock(InputSplitProvider.class), mock(CheckpointResponder.class), libCache, mock(FileCache.class), new TestingTaskManagerRuntimeInfo(taskManagerConfig, new String[] { System.getProperty("java.io.tmpdir") }), new UnregisteredTaskMetricsGroup(), consumableNotifier, partitionProducerStateChecker, executor);
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class CheckpointCoordinatorExternalizedCheckpointsTest method testTriggerAndConfirmSimpleExternalizedCheckpoint.
/**
* Triggers multiple externalized checkpoints and verifies that the metadata
* files have been created.
*/
@Test
public void testTriggerAndConfirmSimpleExternalizedCheckpoint() throws Exception {
final JobID jid = new JobID();
final ExternalizedCheckpointSettings externalizedCheckpointSettings = ExternalizedCheckpointSettings.externalizeCheckpoints(false);
final File checkpointDir = tmp.newFolder();
// create some mock Execution vertices that receive the checkpoint trigger messages
final ExecutionAttemptID attemptID1 = new ExecutionAttemptID();
final ExecutionAttemptID attemptID2 = new ExecutionAttemptID();
ExecutionVertex vertex1 = CheckpointCoordinatorTest.mockExecutionVertex(attemptID1);
ExecutionVertex vertex2 = CheckpointCoordinatorTest.mockExecutionVertex(attemptID2);
Map<JobVertexID, ExecutionJobVertex> jobVertices = new HashMap<>();
jobVertices.put(vertex1.getJobvertexId(), vertex1.getJobVertex());
jobVertices.put(vertex2.getJobvertexId(), vertex2.getJobVertex());
// set up the coordinator and validate the initial state
CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, externalizedCheckpointSettings, new ExecutionVertex[] { vertex1, vertex2 }, new ExecutionVertex[] { vertex1, vertex2 }, new ExecutionVertex[] { vertex1, vertex2 }, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), checkpointDir.getAbsolutePath(), Executors.directExecutor());
assertEquals(0, coord.getNumberOfPendingCheckpoints());
assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints());
// ---------------
// trigger checkpoint 1
// ---------------
{
final long timestamp1 = System.currentTimeMillis();
coord.triggerCheckpoint(timestamp1, false);
long checkpointId1 = coord.getPendingCheckpoints().entrySet().iterator().next().getKey();
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jid, attemptID1, checkpointId1));
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jid, attemptID2, checkpointId1));
CompletedCheckpoint latest = coord.getCheckpointStore().getLatestCheckpoint();
verifyExternalizedCheckpoint(latest, jid, checkpointId1, timestamp1);
verifyExternalizedCheckpointRestore(latest, jobVertices, vertex1, vertex2);
}
// ---------------
// trigger checkpoint 2
// ---------------
{
final long timestamp2 = System.currentTimeMillis() + 7;
coord.triggerCheckpoint(timestamp2, false);
long checkpointId2 = coord.getPendingCheckpoints().entrySet().iterator().next().getKey();
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jid, attemptID1, checkpointId2));
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jid, attemptID2, checkpointId2));
CompletedCheckpoint latest = coord.getCheckpointStore().getLatestCheckpoint();
verifyExternalizedCheckpoint(latest, jid, checkpointId2, timestamp2);
verifyExternalizedCheckpointRestore(latest, jobVertices, vertex1, vertex2);
}
// ---------------
// trigger checkpoint 3
// ---------------
{
final long timestamp3 = System.currentTimeMillis() + 146;
coord.triggerCheckpoint(timestamp3, false);
long checkpointId3 = coord.getPendingCheckpoints().entrySet().iterator().next().getKey();
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jid, attemptID1, checkpointId3));
coord.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(jid, attemptID2, checkpointId3));
CompletedCheckpoint latest = coord.getCheckpointStore().getLatestCheckpoint();
verifyExternalizedCheckpoint(latest, jid, checkpointId3, timestamp3);
verifyExternalizedCheckpointRestore(latest, jobVertices, vertex1, vertex2);
}
coord.shutdown(JobStatus.FINISHED);
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class CheckpointCoordinatorTest method mockExecutionJobVertex.
static ExecutionJobVertex mockExecutionJobVertex(JobVertexID jobVertexID, int parallelism, int maxParallelism) {
final ExecutionJobVertex executionJobVertex = mock(ExecutionJobVertex.class);
ExecutionVertex[] executionVertices = new ExecutionVertex[parallelism];
for (int i = 0; i < parallelism; i++) {
executionVertices[i] = mockExecutionVertex(new ExecutionAttemptID(), jobVertexID, parallelism, maxParallelism, ExecutionState.RUNNING);
when(executionVertices[i].getParallelSubtaskIndex()).thenReturn(i);
}
when(executionJobVertex.getJobVertexId()).thenReturn(jobVertexID);
when(executionJobVertex.getTaskVertices()).thenReturn(executionVertices);
when(executionJobVertex.getParallelism()).thenReturn(parallelism);
when(executionJobVertex.getMaxParallelism()).thenReturn(maxParallelism);
when(executionJobVertex.isMaxParallelismConfigured()).thenReturn(true);
return executionJobVertex;
}
use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.
the class CheckpointCoordinatorTest method testMinTimeBetweenCheckpointsInterval.
/**
* This test verified that after a completed checkpoint a certain time has passed before
* another is triggered.
*/
@Test
public void testMinTimeBetweenCheckpointsInterval() throws Exception {
final JobID jid = new JobID();
// create some mock execution vertices and trigger some checkpoint
final ExecutionAttemptID attemptID = new ExecutionAttemptID();
final ExecutionVertex vertex = mockExecutionVertex(attemptID);
final Execution executionAttempt = vertex.getCurrentExecutionAttempt();
final BlockingQueue<Long> triggerCalls = new LinkedBlockingQueue<>();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
triggerCalls.add((Long) invocation.getArguments()[0]);
return null;
}
}).when(executionAttempt).triggerCheckpoint(anyLong(), anyLong(), any(CheckpointOptions.class));
final long delay = 50;
final CheckpointCoordinator coord = new CheckpointCoordinator(jid, // periodic interval is 2 ms
2, // timeout is very long (200 s)
200_000, // 50 ms delay between checkpoints
delay, 1, ExternalizedCheckpointSettings.none(), new ExecutionVertex[] { vertex }, new ExecutionVertex[] { vertex }, new ExecutionVertex[] { vertex }, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(2), "dummy-path", Executors.directExecutor());
try {
coord.startCheckpointScheduler();
// wait until the first checkpoint was triggered
Long firstCallId = triggerCalls.take();
assertEquals(1L, firstCallId.longValue());
AcknowledgeCheckpoint ackMsg = new AcknowledgeCheckpoint(jid, attemptID, 1L);
// tell the coordinator that the checkpoint is done
final long ackTime = System.nanoTime();
coord.receiveAcknowledgeMessage(ackMsg);
// wait until the next checkpoint is triggered
Long nextCallId = triggerCalls.take();
final long nextCheckpointTime = System.nanoTime();
assertEquals(2L, nextCallId.longValue());
final long delayMillis = (nextCheckpointTime - ackTime) / 1_000_000;
// we need to add one ms here to account for rounding errors
if (delayMillis + 1 < delay) {
fail("checkpoint came too early: delay was " + delayMillis + " but should have been at least " + delay);
}
} finally {
coord.stopCheckpointScheduler();
coord.shutdown(JobStatus.FINISHED);
}
}
Aggregations