use of org.apache.flink.runtime.blob.BlobWriter in project flink by apache.
the class ExecutionJobVertex method getTaskInformationOrBlobKey.
public Either<SerializedValue<TaskInformation>, PermanentBlobKey> getTaskInformationOrBlobKey() throws IOException {
// serialize the task information!
synchronized (stateMonitor) {
if (taskInformationOrBlobKey == null) {
final BlobWriter blobWriter = graph.getBlobWriter();
final TaskInformation taskInformation = new TaskInformation(jobVertex.getID(), jobVertex.getName(), parallelismInfo.getParallelism(), parallelismInfo.getMaxParallelism(), jobVertex.getInvokableClassName(), jobVertex.getConfiguration());
taskInformationOrBlobKey = BlobWriter.serializeAndTryOffload(taskInformation, getJobId(), blobWriter);
}
return taskInformationOrBlobKey;
}
}
use of org.apache.flink.runtime.blob.BlobWriter in project flink by apache.
the class DefaultExecutionGraphDeploymentTest method testBuildDeploymentDescriptor.
@Test
public void testBuildDeploymentDescriptor() throws Exception {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
final JobVertexID jid3 = new JobVertexID();
final JobVertexID jid4 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
JobVertex v3 = new JobVertex("v3", jid3);
JobVertex v4 = new JobVertex("v4", jid4);
v1.setParallelism(10);
v2.setParallelism(10);
v3.setParallelism(10);
v4.setParallelism(10);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v3.setInvokableClass(BatchTask.class);
v4.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
final JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(v1, v2, v3, v4);
final JobID jobId = jobGraph.getJobID();
DirectScheduledExecutorService executor = new DirectScheduledExecutorService();
DefaultExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).setFutureExecutor(executor).setIoExecutor(executor).setBlobWriter(blobWriter).build();
eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
checkJobOffloaded(eg);
ExecutionJobVertex ejv = eg.getAllVertices().get(jid2);
ExecutionVertex vertex = ejv.getTaskVertices()[3];
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
final CompletableFuture<TaskDeploymentDescriptor> tdd = new CompletableFuture<>();
taskManagerGateway.setSubmitConsumer(FunctionUtils.uncheckedConsumer(taskDeploymentDescriptor -> {
taskDeploymentDescriptor.loadBigData(blobCache);
tdd.complete(taskDeploymentDescriptor);
}));
final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(taskManagerGateway).createTestingLogicalSlot();
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
vertex.getCurrentExecutionAttempt().registerProducedPartitions(slot.getTaskManagerLocation(), true).get();
vertex.deployToSlot(slot);
assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
checkTaskOffloaded(eg, vertex.getJobvertexId());
TaskDeploymentDescriptor descr = tdd.get();
assertNotNull(descr);
JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
assertEquals(jobId, descr.getJobId());
assertEquals(jobId, jobInformation.getJobId());
assertEquals(jid2, taskInformation.getJobVertexId());
assertEquals(3, descr.getSubtaskIndex());
assertEquals(10, taskInformation.getNumberOfSubtasks());
assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName());
assertEquals("v2", taskInformation.getTaskName());
Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions();
Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates();
assertEquals(2, producedPartitions.size());
assertEquals(1, consumedPartitions.size());
Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator();
Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator();
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
ShuffleDescriptor[] shuffleDescriptors = iteratorConsumedPartitions.next().getShuffleDescriptors();
assertEquals(10, shuffleDescriptors.length);
Iterator<ConsumedPartitionGroup> iteratorConsumedPartitionGroup = vertex.getAllConsumedPartitionGroups().iterator();
int idx = 0;
for (IntermediateResultPartitionID partitionId : iteratorConsumedPartitionGroup.next()) {
assertEquals(partitionId, shuffleDescriptors[idx++].getResultPartitionID().getPartitionId());
}
}
use of org.apache.flink.runtime.blob.BlobWriter in project flink by apache.
the class DefaultExecutionGraphFactory method createAndRestoreExecutionGraph.
@Override
public ExecutionGraph createAndRestoreExecutionGraph(JobGraph jobGraph, CompletedCheckpointStore completedCheckpointStore, CheckpointsCleaner checkpointsCleaner, CheckpointIDCounter checkpointIdCounter, TaskDeploymentDescriptorFactory.PartitionLocationConstraint partitionLocationConstraint, long initializationTimestamp, VertexAttemptNumberStore vertexAttemptNumberStore, VertexParallelismStore vertexParallelismStore, ExecutionStateUpdateListener executionStateUpdateListener, Logger log) throws Exception {
ExecutionDeploymentListener executionDeploymentListener = new ExecutionDeploymentTrackerDeploymentListenerAdapter(executionDeploymentTracker);
ExecutionStateUpdateListener combinedExecutionStateUpdateListener = (execution, previousState, newState) -> {
executionStateUpdateListener.onStateUpdate(execution, previousState, newState);
if (newState.isTerminal()) {
executionDeploymentTracker.stopTrackingDeploymentOf(execution);
}
};
final ExecutionGraph newExecutionGraph = DefaultExecutionGraphBuilder.buildGraph(jobGraph, configuration, futureExecutor, ioExecutor, userCodeClassLoader, completedCheckpointStore, checkpointsCleaner, checkpointIdCounter, rpcTimeout, blobWriter, log, shuffleMaster, jobMasterPartitionTracker, partitionLocationConstraint, executionDeploymentListener, combinedExecutionStateUpdateListener, initializationTimestamp, vertexAttemptNumberStore, vertexParallelismStore, checkpointStatsTrackerFactory, isDynamicGraph);
final CheckpointCoordinator checkpointCoordinator = newExecutionGraph.getCheckpointCoordinator();
if (checkpointCoordinator != null) {
// check whether we find a valid checkpoint
if (!checkpointCoordinator.restoreInitialCheckpointIfPresent(new HashSet<>(newExecutionGraph.getAllVertices().values()))) {
// check whether we can restore from a savepoint
tryRestoreExecutionGraphFromSavepoint(newExecutionGraph, jobGraph.getSavepointRestoreSettings());
}
}
return newExecutionGraph;
}
Aggregations