use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class TaskDeploymentDescriptorFactory method computeConsumedPartitionShuffleDescriptors.
private MaybeOffloaded<ShuffleDescriptor[]> computeConsumedPartitionShuffleDescriptors(ConsumedPartitionGroup consumedPartitionGroup) throws IOException {
ShuffleDescriptor[] shuffleDescriptors = new ShuffleDescriptor[consumedPartitionGroup.size()];
// Each edge is connected to a different result partition
int i = 0;
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
shuffleDescriptors[i++] = getConsumedPartitionShuffleDescriptor(resultPartitionRetriever.apply(partitionId), partitionDeploymentConstraint);
}
return serializeAndTryOffloadShuffleDescriptors(shuffleDescriptors);
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class DefaultExecutionGraphDeploymentTest method testBuildDeploymentDescriptor.
@Test
public void testBuildDeploymentDescriptor() throws Exception {
final JobVertexID jid1 = new JobVertexID();
final JobVertexID jid2 = new JobVertexID();
final JobVertexID jid3 = new JobVertexID();
final JobVertexID jid4 = new JobVertexID();
JobVertex v1 = new JobVertex("v1", jid1);
JobVertex v2 = new JobVertex("v2", jid2);
JobVertex v3 = new JobVertex("v3", jid3);
JobVertex v4 = new JobVertex("v4", jid4);
v1.setParallelism(10);
v2.setParallelism(10);
v3.setParallelism(10);
v4.setParallelism(10);
v1.setInvokableClass(BatchTask.class);
v2.setInvokableClass(BatchTask.class);
v3.setInvokableClass(BatchTask.class);
v4.setInvokableClass(BatchTask.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v3.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
v4.connectNewDataSetAsInput(v2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
final JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(v1, v2, v3, v4);
final JobID jobId = jobGraph.getJobID();
DirectScheduledExecutorService executor = new DirectScheduledExecutorService();
DefaultExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setJobGraph(jobGraph).setFutureExecutor(executor).setIoExecutor(executor).setBlobWriter(blobWriter).build();
eg.start(ComponentMainThreadExecutorServiceAdapter.forMainThread());
checkJobOffloaded(eg);
ExecutionJobVertex ejv = eg.getAllVertices().get(jid2);
ExecutionVertex vertex = ejv.getTaskVertices()[3];
final SimpleAckingTaskManagerGateway taskManagerGateway = new SimpleAckingTaskManagerGateway();
final CompletableFuture<TaskDeploymentDescriptor> tdd = new CompletableFuture<>();
taskManagerGateway.setSubmitConsumer(FunctionUtils.uncheckedConsumer(taskDeploymentDescriptor -> {
taskDeploymentDescriptor.loadBigData(blobCache);
tdd.complete(taskDeploymentDescriptor);
}));
final LogicalSlot slot = new TestingLogicalSlotBuilder().setTaskManagerGateway(taskManagerGateway).createTestingLogicalSlot();
assertEquals(ExecutionState.CREATED, vertex.getExecutionState());
vertex.getCurrentExecutionAttempt().transitionState(ExecutionState.SCHEDULED);
vertex.getCurrentExecutionAttempt().registerProducedPartitions(slot.getTaskManagerLocation(), true).get();
vertex.deployToSlot(slot);
assertEquals(ExecutionState.DEPLOYING, vertex.getExecutionState());
checkTaskOffloaded(eg, vertex.getJobvertexId());
TaskDeploymentDescriptor descr = tdd.get();
assertNotNull(descr);
JobInformation jobInformation = descr.getSerializedJobInformation().deserializeValue(getClass().getClassLoader());
TaskInformation taskInformation = descr.getSerializedTaskInformation().deserializeValue(getClass().getClassLoader());
assertEquals(jobId, descr.getJobId());
assertEquals(jobId, jobInformation.getJobId());
assertEquals(jid2, taskInformation.getJobVertexId());
assertEquals(3, descr.getSubtaskIndex());
assertEquals(10, taskInformation.getNumberOfSubtasks());
assertEquals(BatchTask.class.getName(), taskInformation.getInvokableClassName());
assertEquals("v2", taskInformation.getTaskName());
Collection<ResultPartitionDeploymentDescriptor> producedPartitions = descr.getProducedPartitions();
Collection<InputGateDeploymentDescriptor> consumedPartitions = descr.getInputGates();
assertEquals(2, producedPartitions.size());
assertEquals(1, consumedPartitions.size());
Iterator<ResultPartitionDeploymentDescriptor> iteratorProducedPartitions = producedPartitions.iterator();
Iterator<InputGateDeploymentDescriptor> iteratorConsumedPartitions = consumedPartitions.iterator();
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
assertEquals(10, iteratorProducedPartitions.next().getNumberOfSubpartitions());
ShuffleDescriptor[] shuffleDescriptors = iteratorConsumedPartitions.next().getShuffleDescriptors();
assertEquals(10, shuffleDescriptors.length);
Iterator<ConsumedPartitionGroup> iteratorConsumedPartitionGroup = vertex.getAllConsumedPartitionGroups().iterator();
int idx = 0;
for (IntermediateResultPartitionID partitionId : iteratorConsumedPartitionGroup.next()) {
assertEquals(partitionId, shuffleDescriptors[idx++].getResultPartitionID().getPartitionId());
}
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class NettyShuffleEnvironment method updatePartitionInfo.
@Override
public boolean updatePartitionInfo(ExecutionAttemptID consumerID, PartitionInfo partitionInfo) throws IOException, InterruptedException {
IntermediateDataSetID intermediateResultPartitionID = partitionInfo.getIntermediateDataSetID();
InputGateID id = new InputGateID(intermediateResultPartitionID, consumerID);
SingleInputGate inputGate = inputGatesById.get(id);
if (inputGate == null) {
return false;
}
ShuffleDescriptor shuffleDescriptor = partitionInfo.getShuffleDescriptor();
checkArgument(shuffleDescriptor instanceof NettyShuffleDescriptor, "Tried to update unknown channel with unknown ShuffleDescriptor %s.", shuffleDescriptor.getClass().getName());
inputGate.updateInputChannel(taskExecutorResourceId, (NettyShuffleDescriptor) shuffleDescriptor);
return true;
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForAllToAllEdgeAfterFinished.
private void testRemoveCacheForAllToAllEdgeAfterFinished(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.ALL_TO_ALL, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(PARALLELISM, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
// For the all-to-all edge, we transition all downstream tasks to finished
CompletableFuture.runAsync(() -> transitionTasksToFinished(executionGraph, v2.getID()), mainThreadExecutor).join();
ioExecutor.triggerAll();
// Cache should be removed since partitions are released
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2));
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class TaskTest method testExecutionFailsInNetworkRegistrationForPartitions.
@Test
public void testExecutionFailsInNetworkRegistrationForPartitions() throws Exception {
final PartitionDescriptor partitionDescriptor = PartitionDescriptorBuilder.newBuilder().build();
final ShuffleDescriptor shuffleDescriptor = NettyShuffleDescriptorBuilder.newBuilder().buildLocal();
final ResultPartitionDeploymentDescriptor dummyPartition = new ResultPartitionDeploymentDescriptor(partitionDescriptor, shuffleDescriptor, 1, false);
testExecutionFailsInNetworkRegistration(Collections.singletonList(dummyPartition), Collections.emptyList());
}
Aggregations