use of org.apache.flink.runtime.scheduler.DefaultScheduler in project flink by apache.
the class OperatorCoordinatorSchedulerTest method testFailureToStartClosesCoordinator.
@Test
public void testFailureToStartClosesCoordinator() throws Exception {
final OperatorCoordinator.Provider failingCoordinatorProvider = new TestingOperatorCoordinator.Provider(testOperatorId, CoordinatorThatFailsInStart::new);
final DefaultScheduler scheduler = createScheduler(failingCoordinatorProvider);
final TestingOperatorCoordinator coordinator = getCoordinator(scheduler);
try {
scheduler.startScheduling();
} catch (Exception ignored) {
}
assertTrue(coordinator.isClosed());
}
use of org.apache.flink.runtime.scheduler.DefaultScheduler in project flink by apache.
the class OperatorCoordinatorSchedulerTest method testLocalFailoverDoesNotResetToCheckpoint.
@Test
public void testLocalFailoverDoesNotResetToCheckpoint() throws Exception {
final DefaultScheduler scheduler = createSchedulerAndDeployTasks();
final TestingOperatorCoordinator coordinator = getCoordinator(scheduler);
takeCompleteCheckpoint(scheduler, coordinator, new byte[] { 37, 11, 83, 4 });
failAndRestartTask(scheduler, 0);
assertNull("coordinator should not have a restored checkpoint", coordinator.getLastRestoredCheckpointState());
}
use of org.apache.flink.runtime.scheduler.DefaultScheduler in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForPointwiseEdgeAfterFailover.
private void testRemoveCacheForPointwiseEdgeAfterFailover(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.POINTWISE, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(1, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
triggerExceptionAndComplete(executionGraph, v1, v2);
ioExecutor.triggerAll();
// The cache of the first upstream task should be removed during
// ExecutionVertex#resetForNewExecution
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2, 0));
// The cache of the other upstream tasks should stay
final ShuffleDescriptor[] shuffleDescriptorsForOtherVertex = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2, 1), jobId, blobWriter);
assertEquals(1, shuffleDescriptorsForOtherVertex.length);
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
use of org.apache.flink.runtime.scheduler.DefaultScheduler in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForPointwiseEdgeAfterFinished.
private void testRemoveCacheForPointwiseEdgeAfterFinished(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.POINTWISE, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(1, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
// For the pointwise edge, we just transition the first downstream task to FINISHED
ExecutionVertex ev21 = Objects.requireNonNull(executionGraph.getJobVertex(v2.getID())).getTaskVertices()[0];
CompletableFuture.runAsync(() -> transitionTaskToFinished(executionGraph, ev21), mainThreadExecutor).join();
ioExecutor.triggerAll();
// The cache of the first upstream task should be removed since its partition is released
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2, 0));
// The cache of the other upstream tasks should stay
final ShuffleDescriptor[] shuffleDescriptorsForOtherVertex = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2, 1), jobId, blobWriter);
assertEquals(1, shuffleDescriptorsForOtherVertex.length);
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
use of org.apache.flink.runtime.scheduler.DefaultScheduler in project flink by apache.
the class RemoveCachedShuffleDescriptorTest method testRemoveCacheForAllToAllEdgeAfterFailover.
private void testRemoveCacheForAllToAllEdgeAfterFailover(TestingBlobWriter blobWriter, int expectedBefore, int expectedAfter) throws Exception {
final JobID jobId = new JobID();
final JobVertex v1 = ExecutionGraphTestUtils.createNoOpVertex("v1", PARALLELISM);
final JobVertex v2 = ExecutionGraphTestUtils.createNoOpVertex("v2", PARALLELISM);
final DefaultScheduler scheduler = createSchedulerAndDeploy(jobId, v1, v2, DistributionPattern.ALL_TO_ALL, blobWriter);
final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
// ShuffleDescriptors should be cached during the deployment
final ShuffleDescriptor[] shuffleDescriptors = deserializeShuffleDescriptors(getConsumedCachedShuffleDescriptor(executionGraph, v2), jobId, blobWriter);
assertEquals(PARALLELISM, shuffleDescriptors.length);
assertEquals(expectedBefore, blobWriter.numberOfBlobs());
triggerGlobalFailoverAndComplete(scheduler, v1);
ioExecutor.triggerAll();
// Cache should be removed during ExecutionVertex#resetForNewExecution
assertNull(getConsumedCachedShuffleDescriptor(executionGraph, v2));
assertEquals(expectedAfter, blobWriter.numberOfBlobs());
}
Aggregations