use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class TaskTest method testExecutionFailsInNetworkRegistrationForGates.
@Test
public void testExecutionFailsInNetworkRegistrationForGates() throws Exception {
final ShuffleDescriptor dummyChannel = NettyShuffleDescriptorBuilder.newBuilder().buildRemote();
final InputGateDeploymentDescriptor dummyGate = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new ShuffleDescriptor[] { dummyChannel });
testExecutionFailsInNetworkRegistration(Collections.emptyList(), Collections.singletonList(dummyGate));
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForPointwiseEdgeAfterFailover.
private void testRemoveCacheForPointwiseEdgeAfterFailover(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.POINTWISE, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(1, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
triggerExceptionAndComplete(executionGraph, v1, v2);
ioExecutor.triggerAll();
// The cache of the first upstream task should be removed during
// ExecutionVertex#resetForNewExecution
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2, 0));
// The cache of the other upstream tasks should stay
final ShuffleDescriptor[] shuffleDescriptorsForOtherVertex = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2, 1), jobId, blobWriter);
assertEquals(1, shuffleDescriptorsForOtherVertex.length);
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForPointwiseEdgeAfterFinished.
private void testRemoveCacheForPointwiseEdgeAfterFinished(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.POINTWISE, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(1, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
// For the pointwise edge, we just transition the first downstream task to FINISHED
ExecutionVertex ev21 = Objects.requireNonNull(executionGraph.getJobVertex(v2.getID())).getTaskVertices()[0];
CompletableFuture.runAsync(() -> transitionTaskToFinished(executionGraph, ev21), mainThreadExecutor).join();
ioExecutor.triggerAll();
// The cache of the first upstream task should be removed since its partition is released
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2, 0));
// The cache of the other upstream tasks should stay
final ShuffleDescriptor[] shuffleDescriptorsForOtherVertex = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2, 1), jobId, blobWriter);
assertEquals(1, shuffleDescriptorsForOtherVertex.length);
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForAllToAllEdgeAfterFailover.
private void testRemoveCacheForAllToAllEdgeAfterFailover(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.ALL_TO_ALL, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(PARALLELISM, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
triggerGlobalFailoverAndComplete(scheduler, v1);
ioExecutor.triggerAll();
// Cache should be removed during ExecutionVertex#resetForNewExecution
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2));
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
use of org.apache.flink.runtime.shuffle.ShuffleDescriptor in project flink by apache.
the class IntermediateResult method clearCachedInformationForPartitionGroup.
public void clearCachedInformationForPartitionGroup(ConsumedPartitionGroup consumedPartitionGroup) {
// When a ConsumedPartitionGroup changes, the cache of ShuffleDescriptors for this
// partition group is no longer valid and needs to be removed.
//
// Currently, there are two scenarios:
// 1. The ConsumedPartitionGroup is released
// 2. Its producer encounters a failover
// Remove the cache for the ConsumedPartitionGroup and notify the BLOB writer to delete the
// cache if it is offloaded
final MaybeOffloaded<ShuffleDescriptor[]> cache = this.shuffleDescriptorCache.remove(consumedPartitionGroup);
if (cache instanceof Offloaded) {
PermanentBlobKey blobKey = ((Offloaded<ShuffleDescriptor[]>) cache).serializedValueKey;
this.producer.getGraph().deleteBlobs(Collections.singletonList(blobKey));
}
}
Aggregations