use of org.apache.flink.runtime.jobmaster.TestingLogicalSlotBuilder in project flink by apache.
the class RegionToRestartInBatchJobBenchmark method setup.
@Override
public void setup(JobConfiguration jobConfiguration) throws Exception {
super.setup(jobConfiguration);
final JobVertex source = jobVertices.get(0);
final JobVertex sink = jobVertices.get(1);
final TestingLogicalSlotBuilder slotBuilder = new TestingLogicalSlotBuilder();
deployTasks(executionGraph, source.getID(), slotBuilder, true);
transitionTaskStatus(executionGraph, source.getID(), ExecutionState.FINISHED);
deployTasks(executionGraph, sink.getID(), slotBuilder, true);
}
use of org.apache.flink.runtime.jobmaster.TestingLogicalSlotBuilder in project flink-mirror by flink-ci.
the class CheckpointCoordinatorTest method testTasksFinishDuringTriggering.
@Test
public void testTasksFinishDuringTriggering() throws Exception {
JobVertexID jobVertexID1 = new JobVertexID();
JobVertexID jobVertexID2 = new JobVertexID();
ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().setTransitToRunning(false).addJobVertex(jobVertexID1, 1, 256).addJobVertex(jobVertexID2, 1, 256).build();
ExecutionJobVertex jobVertex1 = graph.getJobVertex(jobVertexID1);
ExecutionVertex taskVertex = jobVertex1.getTaskVertices()[0];
ExecutionJobVertex jobVertex2 = graph.getJobVertex(jobVertexID2);
ExecutionVertex taskVertex2 = jobVertex2.getTaskVertices()[0];
AtomicBoolean checkpointAborted = new AtomicBoolean(false);
LogicalSlot slot1 = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SimpleAckingTaskManagerGateway() {
@Override
public CompletableFuture<Acknowledge> triggerCheckpoint(ExecutionAttemptID executionAttemptID, JobID jobId, long checkpointId, long timestamp, CheckpointOptions checkpointOptions) {
taskVertex.getCurrentExecutionAttempt().markFinished();
return FutureUtils.completedExceptionally(new RpcException(""));
}
}).createTestingLogicalSlot();
LogicalSlot slot2 = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SimpleAckingTaskManagerGateway() {
@Override
public void notifyCheckpointAborted(ExecutionAttemptID executionAttemptID, JobID jobId, long checkpointId, long latestCompletedCheckpointId, long timestamp) {
checkpointAborted.set(true);
}
}).createTestingLogicalSlot();
ExecutionGraphTestUtils.setVertexResource(taskVertex, slot1);
taskVertex.getCurrentExecutionAttempt().transitionState(ExecutionState.RUNNING);
ExecutionGraphTestUtils.setVertexResource(taskVertex2, slot2);
taskVertex2.getCurrentExecutionAttempt().transitionState(ExecutionState.RUNNING);
CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setTimer(manuallyTriggeredScheduledExecutor).setAllowCheckpointsAfterTasksFinished(true).build();
// nothing should be happening
assertEquals(0, checkpointCoordinator.getNumberOfPendingCheckpoints());
assertEquals(0, checkpointCoordinator.getNumberOfRetainedSuccessfulCheckpoints());
// trigger the first checkpoint. this will not fail because we allow checkpointing even with
// finished tasks
final CompletableFuture<CompletedCheckpoint> checkpointFuture = checkpointCoordinator.triggerCheckpoint(false);
manuallyTriggeredScheduledExecutor.triggerAll();
assertTrue(checkpointFuture.isCompletedExceptionally());
assertTrue(checkpointAborted.get());
}
use of org.apache.flink.runtime.jobmaster.TestingLogicalSlotBuilder in project flink-mirror by flink-ci.
the class DefaultExecutionGraphDeploymentTest method testBuildDeploymentDescriptor.
@Test
public void testBuildDeploymentDescriptor() throws Exception {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
final JobVertexID jid3 = new JobVertexID();
final JobVertexID jid4 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
JobVertex v3 = new JobVertex("v3", jid3);
JobVertex v4 = new JobVertex("v4", jid4);
v1.setParallelism(10);
v2.setParallelism(10);
v3.setParallelism(10);
v4.setParallelism(10);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v3.setInvokableClass(BatchTask.class);
v4.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
final JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(v1, v2, v3, v4);
final JobID jobId = jobGraph.getJobID();
DirectScheduledExecutorService executor = new DirectScheduledExecutorService();
DefaultExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).setFutureExecutor(executor).setIoExecutor(executor).setBlobWriter(blobWriter).build();
eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
checkJobOffloaded(eg);
ExecutionJobVertex ejv = eg.getAllVertices().get(jid2);
ExecutionVertex vertex = ejv.getTaskVertices()[3];
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
final CompletableFuture<TaskDeploymentDescriptor> tdd = new CompletableFuture<>();
taskManagerGateway.setSubmitConsumer(FunctionUtils.uncheckedConsumer(taskDeploymentDescriptor -> {
taskDeploymentDescriptor.loadBigData(blobCache);
tdd.complete(taskDeploymentDescriptor);
}));
final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(taskManagerGateway).createTestingLogicalSlot();
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
vertex.getCurrentExecutionAttempt().registerProducedPartitions(slot.getTaskManagerLocation(), true).get();
vertex.deployToSlot(slot);
assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
checkTaskOffloaded(eg, vertex.getJobvertexId());
TaskDeploymentDescriptor descr = tdd.get();
assertNotNull(descr);
JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
assertEquals(jobId, descr.getJobId());
assertEquals(jobId, jobInformation.getJobId());
assertEquals(jid2, taskInformation.getJobVertexId());
assertEquals(3, descr.getSubtaskIndex());
assertEquals(10, taskInformation.getNumberOfSubtasks());
assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName());
assertEquals("v2", taskInformation.getTaskName());
Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions();
Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates();
assertEquals(2, producedPartitions.size());
assertEquals(1, consumedPartitions.size());
Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator();
Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator();
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
ShuffleDescriptor[] shuffleDescriptors = iteratorConsumedPartitions.next().getShuffleDescriptors();
assertEquals(10, shuffleDescriptors.length);
Iterator<ConsumedPartitionGroup> iteratorConsumedPartitionGroup = vertex.getAllConsumedPartitionGroups().iterator();
int idx = 0;
for (IntermediateResultPartitionID partitionId : iteratorConsumedPartitionGroup.next()) {
assertEquals(partitionId, shuffleDescriptors[idx++].getResultPartitionID().getPartitionId());
}
}
use of org.apache.flink.runtime.jobmaster.TestingLogicalSlotBuilder in project flink-mirror by flink-ci.
the class ExecutionVertexDeploymentTest method testDeployFailedAsynchronously.
@Test
public void testDeployFailedAsynchronously() {
try {
final ExecutionVertex vertex = getExecutionVertex();
final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(new SubmitFailingSimpleAckingTaskManagerGateway()).createTestingLogicalSlot();
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
vertex.deployToSlot(slot);
// wait until the state transition must be done
for (int i = 0; i < 100; i++) {
if (vertex.getExecutionState() == ExecutionState.FAILED && vertex.getFailureInfo().isPresent()) {
break;
} else {
Thread.sleep(10);
}
}
assertEquals(ExecutionState.FAILED, vertex.getExecutionState());
assertTrue(vertex.getFailureInfo().isPresent());
assertThat(vertex.getFailureInfo().map(ErrorInfo::getExceptionAsString).get(), containsString(ERROR_MESSAGE));
assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
assertTrue(vertex.getStateTimestamp(ExecutionState.FAILED) > 0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.runtime.jobmaster.TestingLogicalSlotBuilder in project flink-mirror by flink-ci.
the class ExecutionVertexDeploymentTest method testDeployWithAsynchronousAnswer.
@Test
public void testDeployWithAsynchronousAnswer() {
try {
final ExecutionVertex vertex = getExecutionVertex();
final LogicalSlot slot = new TestingLogicalSlotBuilder().createTestingLogicalSlot();
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
vertex.deployToSlot(slot);
// no repeated scheduling
try {
vertex.deployToSlot(slot);
fail("Scheduled from wrong state");
} catch (IllegalStateException e) {
// as expected
}
assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
// no repeated scheduling
try {
vertex.deployToSlot(slot);
fail("Scheduled from wrong state");
} catch (IllegalStateException e) {
// as expected
}
assertTrue(vertex.getStateTimestamp(ExecutionState.CREATED) > 0);
assertTrue(vertex.getStateTimestamp(ExecutionState.DEPLOYING) > 0);
assertTrue(vertex.getStateTimestamp(ExecutionState.RUNNING) == 0);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations