Search in sources :

Example 1 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class ExecutionGraphPartitionReleaseTest method testStrategyNotifiedOfUnFinishedVertices.

@Test
public void testStrategyNotifiedOfUnFinishedVertices() throws Exception {
    // setup a pipeline of 2 failover regions (f1 -> f2), where
    // f1 is just a source
    // f2 consists of 3 operators (o1,o2,o3), where o1 consumes f1, and o2/o3 consume o1
    final JobVertex sourceVertex = ExecutionGraphTestUtils.createNoOpVertex("source", 1);
    final JobVertex operator1Vertex = ExecutionGraphTestUtils.createNoOpVertex("operator1", 1);
    final JobVertex operator2Vertex = ExecutionGraphTestUtils.createNoOpVertex("operator2", 1);
    final JobVertex operator3Vertex = ExecutionGraphTestUtils.createNoOpVertex("operator3", 1);
    operator1Vertex.connectNewDataSetAsInput(sourceVertex, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
    operator2Vertex.connectNewDataSetAsInput(operator1Vertex, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    operator3Vertex.connectNewDataSetAsInput(operator1Vertex, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
    // setup partition tracker to intercept partition release calls
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    final Queue<ResultPartitionID> releasedPartitions = new ArrayDeque<>();
    partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionIds -> releasedPartitions.add(partitionIds.iterator().next()));
    final SchedulerBase scheduler = createScheduler(partitionTracker, sourceVertex, operator1Vertex, operator2Vertex, operator3Vertex);
    final ExecutionGraph executionGraph = scheduler.getExecutionGraph();
    mainThreadExecutor.execute(() -> {
        final Execution sourceExecution = getCurrentExecution(sourceVertex, executionGraph);
        // finish the source; this should not result in any release calls since the
        // consumer o1 was not finished
        scheduler.updateTaskExecutionState(new TaskExecutionState(sourceExecution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, empty());
    });
    mainThreadExecutor.execute(() -> {
        final Execution operator1Execution = getCurrentExecution(operator1Vertex, executionGraph);
        // release calls since not all operators of the pipelined region are finished
        for (final IntermediateResultPartitionID partitionId : operator1Execution.getVertex().getProducedPartitions().keySet()) {
            scheduler.notifyPartitionDataAvailable(new ResultPartitionID(partitionId, operator1Execution.getAttemptId()));
        }
        scheduler.updateTaskExecutionState(new TaskExecutionState(operator1Execution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, empty());
    });
    mainThreadExecutor.execute(() -> {
        final Execution operator2Execution = getCurrentExecution(operator2Vertex, executionGraph);
        // finish o2; this should not result in any release calls since o3 was not
        // finished
        scheduler.updateTaskExecutionState(new TaskExecutionState(operator2Execution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, empty());
    });
    mainThreadExecutor.execute(() -> {
        final Execution operator2Execution = getCurrentExecution(operator2Vertex, executionGraph);
        // reset o2
        operator2Execution.getVertex().resetForNewExecution();
        assertThat(releasedPartitions, empty());
    });
    mainThreadExecutor.execute(() -> {
        final Execution operator3Execution = getCurrentExecution(operator3Vertex, executionGraph);
        // finish o3; this should not result in any release calls since o2 was reset
        scheduler.updateTaskExecutionState(new TaskExecutionState(operator3Execution.getAttemptId(), ExecutionState.FINISHED));
        assertThat(releasedPartitions, empty());
    });
}
Also used : JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) SchedulerBase(org.apache.flink.runtime.scheduler.SchedulerBase) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) ArrayDeque(java.util.ArrayDeque) TaskExecutionState(org.apache.flink.runtime.taskmanager.TaskExecutionState) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 2 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class ExecutionGraphResultPartitionAvailabilityCheckerTest method testPartitionAvailabilityCheck.

@Test
public void testPartitionAvailabilityCheck() {
    final IntermediateResultPartitionID irp1ID = new IntermediateResultPartitionID();
    final IntermediateResultPartitionID irp2ID = new IntermediateResultPartitionID();
    final IntermediateResultPartitionID irp3ID = new IntermediateResultPartitionID();
    final IntermediateResultPartitionID irp4ID = new IntermediateResultPartitionID();
    final Map<IntermediateResultPartitionID, Boolean> expectedAvailability = new HashMap<IntermediateResultPartitionID, Boolean>() {

        {
            put(irp1ID, true);
            put(irp2ID, false);
            put(irp3ID, false);
            put(irp4ID, true);
        }
    };
    // let the partition tracker respect the expected availability result
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    partitionTracker.setIsPartitionTrackedFunction(rpID -> expectedAvailability.get(rpID.getPartitionId()));
    // the execution attempt ID should make no difference in this case
    final Function<IntermediateResultPartitionID, ResultPartitionID> partitionIDMapper = intermediateResultPartitionID -> new ResultPartitionID(intermediateResultPartitionID, new ExecutionAttemptID());
    final ResultPartitionAvailabilityChecker resultPartitionAvailabilityChecker = new ExecutionGraphResultPartitionAvailabilityChecker(partitionIDMapper, partitionTracker);
    for (IntermediateResultPartitionID irpID : expectedAvailability.keySet()) {
        assertEquals(expectedAvailability.get(irpID), resultPartitionAvailabilityChecker.isAvailable(irpID));
    }
}
Also used : ResultPartitionAvailabilityChecker(org.apache.flink.runtime.executiongraph.failover.flip1.ResultPartitionAvailabilityChecker) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) Map(java.util.Map) TestLogger(org.apache.flink.util.TestLogger) Test(org.junit.Test) HashMap(java.util.HashMap) Function(java.util.function.Function) Assert.assertEquals(org.junit.Assert.assertEquals) HashMap(java.util.HashMap) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) ResultPartitionAvailabilityChecker(org.apache.flink.runtime.executiongraph.failover.flip1.ResultPartitionAvailabilityChecker) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 3 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class ExecutionPartitionLifecycleTest method testPartitionTrackingForStateTransition.

private void testPartitionTrackingForStateTransition(final Consumer<Execution> stateTransition, final PartitionReleaseResult partitionReleaseResult) throws Exception {
    CompletableFuture<Tuple2<ResourceID, ResultPartitionDeploymentDescriptor>> partitionStartTrackingFuture = new CompletableFuture<>();
    CompletableFuture<Collection<ResultPartitionID>> partitionStopTrackingFuture = new CompletableFuture<>();
    CompletableFuture<Collection<ResultPartitionID>> partitionStopTrackingAndReleaseFuture = new CompletableFuture<>();
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    partitionTracker.setStartTrackingPartitionsConsumer((resourceID, resultPartitionDeploymentDescriptor) -> partitionStartTrackingFuture.complete(Tuple2.of(resourceID, resultPartitionDeploymentDescriptor)));
    partitionTracker.setStopTrackingPartitionsConsumer(partitionStopTrackingFuture::complete);
    partitionTracker.setStopTrackingAndReleasePartitionsConsumer(partitionStopTrackingAndReleaseFuture::complete);
    setupExecutionGraphAndStartRunningJob(ResultPartitionType.BLOCKING, partitionTracker, new SimpleAckingTaskManagerGateway(), ShuffleTestUtils.DEFAULT_SHUFFLE_MASTER);
    Tuple2<ResourceID, ResultPartitionDeploymentDescriptor> startTrackingCall = partitionStartTrackingFuture.get();
    assertThat(startTrackingCall.f0, equalTo(taskExecutorResourceId));
    assertThat(startTrackingCall.f1, equalTo(descriptor));
    stateTransition.accept(execution);
    switch(partitionReleaseResult) {
        case NONE:
            assertFalse(partitionStopTrackingFuture.isDone());
            assertFalse(partitionStopTrackingAndReleaseFuture.isDone());
            break;
        case STOP_TRACKING:
            assertTrue(partitionStopTrackingFuture.isDone());
            assertFalse(partitionStopTrackingAndReleaseFuture.isDone());
            final Collection<ResultPartitionID> stopTrackingCall = partitionStopTrackingFuture.get();
            assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), stopTrackingCall);
            break;
        case STOP_TRACKING_AND_RELEASE:
            assertFalse(partitionStopTrackingFuture.isDone());
            assertTrue(partitionStopTrackingAndReleaseFuture.isDone());
            final Collection<ResultPartitionID> stopTrackingAndReleaseCall = partitionStopTrackingAndReleaseFuture.get();
            assertEquals(Collections.singletonList(descriptor.getShuffleDescriptor().getResultPartitionID()), stopTrackingAndReleaseCall);
            break;
    }
}
Also used : ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) SimpleAckingTaskManagerGateway(org.apache.flink.runtime.executiongraph.utils.SimpleAckingTaskManagerGateway) CompletableFuture(java.util.concurrent.CompletableFuture) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) Tuple2(org.apache.flink.api.java.tuple.Tuple2) Collection(java.util.Collection) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID)

Example 4 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class DefaultSchedulerTest method setUp.

@Before
public void setUp() throws Exception {
    executor = Executors.newSingleThreadExecutor();
    scheduledExecutorService = new DirectScheduledExecutorService();
    configuration = new Configuration();
    testRestartBackoffTimeStrategy = new TestRestartBackoffTimeStrategy(true, 0);
    testExecutionVertexOperations = new TestExecutionVertexOperationsDecorator(new DefaultExecutionVertexOperations());
    executionVertexVersioner = new ExecutionVertexVersioner();
    executionSlotAllocatorFactory = new TestExecutionSlotAllocatorFactory();
    testExecutionSlotAllocator = executionSlotAllocatorFactory.getTestExecutionSlotAllocator();
    shuffleMaster = new TestingShuffleMaster();
    partitionTracker = new TestingJobMasterPartitionTracker();
    timeout = Time.seconds(60);
}
Also used : Configuration(org.apache.flink.configuration.Configuration) TestRestartBackoffTimeStrategy(org.apache.flink.runtime.executiongraph.failover.flip1.TestRestartBackoffTimeStrategy) DirectScheduledExecutorService(org.apache.flink.runtime.testutils.DirectScheduledExecutorService) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) TestingShuffleMaster(org.apache.flink.runtime.shuffle.TestingShuffleMaster) Before(org.junit.Before)

Example 5 with TestingJobMasterPartitionTracker

use of org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker in project flink by apache.

the class JobMasterTest method testTaskExecutorNotReleasedOnFailedAllocationIfPartitionIsAllocated.

@Test
public void testTaskExecutorNotReleasedOnFailedAllocationIfPartitionIsAllocated() throws Exception {
    final JobManagerSharedServices jobManagerSharedServices = new TestingJobManagerSharedServicesBuilder().build();
    final JobGraph jobGraph = JobGraphTestUtils.singleNoOpJobGraph();
    final LocalUnresolvedTaskManagerLocation taskManagerUnresolvedLocation = new LocalUnresolvedTaskManagerLocation();
    final AtomicBoolean isTrackingPartitions = new AtomicBoolean(true);
    final TestingJobMasterPartitionTracker partitionTracker = new TestingJobMasterPartitionTracker();
    partitionTracker.setIsTrackingPartitionsForFunction(ignored -> isTrackingPartitions.get());
    final JobMaster jobMaster = new JobMasterBuilder(jobGraph, rpcService).withConfiguration(configuration).withHighAvailabilityServices(haServices).withJobManagerSharedServices(jobManagerSharedServices).withHeartbeatServices(heartbeatServices).withPartitionTrackerFactory(ignored -> partitionTracker).createJobMaster();
    final CompletableFuture<JobID> disconnectTaskExecutorFuture = new CompletableFuture<>();
    final CompletableFuture<AllocationID> freedSlotFuture = new CompletableFuture<>();
    final TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder().setFreeSlotFunction((allocationID, throwable) -> {
        freedSlotFuture.complete(allocationID);
        return CompletableFuture.completedFuture(Acknowledge.get());
    }).setDisconnectJobManagerConsumer((jobID, throwable) -> disconnectTaskExecutorFuture.complete(jobID)).createTestingTaskExecutorGateway();
    try {
        jobMaster.start();
        final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);
        final Collection<SlotOffer> slotOffers = registerSlotsAtJobMaster(1, jobMasterGateway, jobGraph.getJobID(), testingTaskExecutorGateway, taskManagerUnresolvedLocation);
        // check that we accepted the offered slot
        assertThat(slotOffers, hasSize(1));
        final AllocationID allocationId = slotOffers.iterator().next().getAllocationId();
        jobMasterGateway.failSlot(taskManagerUnresolvedLocation.getResourceID(), allocationId, new FlinkException("Fail allocation test exception"));
        // we should free the slot, but not disconnect from the TaskExecutor as we still have an
        // allocated partition
        assertThat(freedSlotFuture.get(), equalTo(allocationId));
        // trigger some request to guarantee ensure the slotAllocationFailure processing if
        // complete
        jobMasterGateway.requestJobStatus(Time.seconds(5)).get();
        assertThat(disconnectTaskExecutorFuture.isDone(), is(false));
    } finally {
        RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
    }
}
Also used : TaskManagerGateway(org.apache.flink.runtime.jobmanager.slots.TaskManagerGateway) DefaultSchedulerFactory(org.apache.flink.runtime.scheduler.DefaultSchedulerFactory) TestingTaskExecutorGateway(org.apache.flink.runtime.taskexecutor.TestingTaskExecutorGateway) Arrays(java.util.Arrays) Tuple3(org.apache.flink.api.java.tuple.Tuple3) SlotPoolService(org.apache.flink.runtime.jobmaster.slotpool.SlotPoolService) JobMasterBuilder(org.apache.flink.runtime.jobmaster.utils.JobMasterBuilder) RestartStrategyOptions(org.apache.flink.configuration.RestartStrategyOptions) PerJobCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.PerJobCheckpointRecoveryFactory) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) SettableLeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.SettableLeaderRetrievalService) PhysicalSlot(org.apache.flink.runtime.jobmaster.slotpool.PhysicalSlot) TestingFatalErrorHandler(org.apache.flink.runtime.util.TestingFatalErrorHandler) Duration(java.time.Duration) Map(java.util.Map) Matchers.nullValue(org.hamcrest.Matchers.nullValue) CompletedCheckpoint(org.apache.flink.runtime.checkpoint.CompletedCheckpoint) ClassRule(org.junit.ClassRule) SimpleSlotContext(org.apache.flink.runtime.instance.SimpleSlotContext) SlotPoolServiceFactory(org.apache.flink.runtime.jobmaster.slotpool.SlotPoolServiceFactory) AfterClass(org.junit.AfterClass) BlockingQueue(java.util.concurrent.BlockingQueue) JobManagerOptions(org.apache.flink.configuration.JobManagerOptions) Category(org.junit.experimental.categories.Category) HeartbeatServices(org.apache.flink.runtime.heartbeat.HeartbeatServices) SlotOffer(org.apache.flink.runtime.taskexecutor.slot.SlotOffer) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) CountDownLatch(java.util.concurrent.CountDownLatch) TimeUtils(org.apache.flink.util.TimeUtils) Matchers.is(org.hamcrest.Matchers.is) Time(org.apache.flink.api.common.time.Time) InputSplitSource(org.apache.flink.core.io.InputSplitSource) ResourceManagerGateway(org.apache.flink.runtime.resourcemanager.ResourceManagerGateway) FlinkException(org.apache.flink.util.FlinkException) ComponentMainThreadExecutor(org.apache.flink.runtime.concurrent.ComponentMainThreadExecutor) AccessExecution(org.apache.flink.runtime.executiongraph.AccessExecution) JobStatus(org.apache.flink.api.common.JobStatus) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) DefaultInputSplitAssigner(org.apache.flink.api.common.io.DefaultInputSplitAssigner) FutureUtils(org.apache.flink.util.concurrent.FutureUtils) PartitionProducerDisposedException(org.apache.flink.runtime.jobmanager.PartitionProducerDisposedException) BiConsumer(java.util.function.BiConsumer) Matchers.hasSize(org.hamcrest.Matchers.hasSize) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) DistributionPattern(org.apache.flink.runtime.jobgraph.DistributionPattern) Nullable(javax.annotation.Nullable) CheckpointProperties(org.apache.flink.runtime.checkpoint.CheckpointProperties) Before(org.junit.Before) InputSplitAssigner(org.apache.flink.core.io.InputSplitAssigner) Matchers.greaterThanOrEqualTo(org.hamcrest.Matchers.greaterThanOrEqualTo) LocalUnresolvedTaskManagerLocation(org.apache.flink.runtime.taskmanager.LocalUnresolvedTaskManagerLocation) FlinkRuntimeException(org.apache.flink.util.FlinkRuntimeException) InputSplit(org.apache.flink.core.io.InputSplit) ExecutionState(org.apache.flink.runtime.execution.ExecutionState) CheckpointsCleaner(org.apache.flink.runtime.checkpoint.CheckpointsCleaner) Test(org.junit.Test) IOException(java.io.IOException) StreamStateHandle(org.apache.flink.runtime.state.StreamStateHandle) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) JobID(org.apache.flink.api.common.JobID) StandaloneCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.StandaloneCheckpointRecoveryFactory) TestingTaskExecutorGatewayBuilder(org.apache.flink.runtime.taskexecutor.TestingTaskExecutorGatewayBuilder) UnresolvedTaskManagerLocation(org.apache.flink.runtime.taskmanager.UnresolvedTaskManagerLocation) ArrayDeque(java.util.ArrayDeque) TestingHighAvailabilityServices(org.apache.flink.runtime.highavailability.TestingHighAvailabilityServices) SavepointRestoreSettings(org.apache.flink.runtime.jobgraph.SavepointRestoreSettings) CheckpointRetentionPolicy(org.apache.flink.runtime.checkpoint.CheckpointRetentionPolicy) Deadline(org.apache.flink.api.common.time.Deadline) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) RegistrationResponse(org.apache.flink.runtime.registration.RegistrationResponse) TestingRpcService(org.apache.flink.runtime.rpc.TestingRpcService) BiFunction(java.util.function.BiFunction) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) TimeoutException(java.util.concurrent.TimeoutException) ExceptionUtils(org.apache.flink.util.ExceptionUtils) TaskExecutorGateway(org.apache.flink.runtime.taskexecutor.TaskExecutorGateway) TaskExecutorToJobManagerHeartbeatPayload(org.apache.flink.runtime.taskexecutor.TaskExecutorToJobManagerHeartbeatPayload) AggregateFunction(org.apache.flink.api.common.functions.AggregateFunction) InstantiationUtil(org.apache.flink.util.InstantiationUtil) After(org.junit.After) TestLogger(org.apache.flink.util.TestLogger) TestingSchedulerNGFactory(org.apache.flink.runtime.scheduler.TestingSchedulerNGFactory) Assert.fail(org.junit.Assert.fail) BlobServerOptions(org.apache.flink.configuration.BlobServerOptions) CompletedCheckpointStorageLocation(org.apache.flink.runtime.state.CompletedCheckpointStorageLocation) Collection(java.util.Collection) AbstractInvokable(org.apache.flink.runtime.jobgraph.tasks.AbstractInvokable) ResourceManagerId(org.apache.flink.runtime.resourcemanager.ResourceManagerId) UUID(java.util.UUID) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) Collectors(java.util.stream.Collectors) SlotInfoWithUtilization(org.apache.flink.runtime.jobmaster.slotpool.SlotInfoWithUtilization) Acknowledge(org.apache.flink.runtime.messages.Acknowledge) ResourceProfile(org.apache.flink.runtime.clusterframework.types.ResourceProfile) Objects(java.util.Objects) TestingUtils(org.apache.flink.testutils.TestingUtils) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) Matchers.equalTo(org.hamcrest.Matchers.equalTo) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) Optional(java.util.Optional) Queue(java.util.Queue) Matchers.anyOf(org.hamcrest.Matchers.anyOf) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) IntStream(java.util.stream.IntStream) OneShotLatch(org.apache.flink.core.testutils.OneShotLatch) SavepointFormatType(org.apache.flink.core.execution.SavepointFormatType) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) BeforeClass(org.junit.BeforeClass) AccessExecutionVertex(org.apache.flink.runtime.executiongraph.AccessExecutionVertex) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ResultPartitionType(org.apache.flink.runtime.io.network.partition.ResultPartitionType) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) RestartStrategies(org.apache.flink.api.common.restartstrategy.RestartStrategies) Function(java.util.function.Function) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) FailoverStrategyFactoryLoader(org.apache.flink.runtime.executiongraph.failover.flip1.FailoverStrategyFactoryLoader) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) FailsWithAdaptiveScheduler(org.apache.flink.testutils.junit.FailsWithAdaptiveScheduler) JobGraphTestUtils(org.apache.flink.runtime.jobgraph.JobGraphTestUtils) TestingSlotPoolServiceBuilder(org.apache.flink.runtime.jobmaster.slotpool.TestingSlotPoolServiceBuilder) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) Nonnull(javax.annotation.Nonnull) StandaloneCompletedCheckpointStore(org.apache.flink.runtime.checkpoint.StandaloneCompletedCheckpointStore) ArchivedExecutionGraph(org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph) SlotPool(org.apache.flink.runtime.jobmaster.slotpool.SlotPool) Matchers.empty(org.hamcrest.Matchers.empty) JobGraphBuilder(org.apache.flink.runtime.jobgraph.JobGraphBuilder) TestingSchedulerNG(org.apache.flink.runtime.scheduler.TestingSchedulerNG) Configuration(org.apache.flink.configuration.Configuration) TestingHeartbeatServices(org.apache.flink.runtime.heartbeat.TestingHeartbeatServices) Matchers(org.hamcrest.Matchers) RpcUtils(org.apache.flink.runtime.rpc.RpcUtils) ExecutionGraphInfo(org.apache.flink.runtime.scheduler.ExecutionGraphInfo) CheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory) TimeUnit(java.util.concurrent.TimeUnit) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TestingResourceManagerGateway(org.apache.flink.runtime.resourcemanager.utils.TestingResourceManagerGateway) ClosureCleaner(org.apache.flink.api.java.ClosureCleaner) TaskExecutionState(org.apache.flink.runtime.taskmanager.TaskExecutionState) CommonTestUtils(org.apache.flink.runtime.testutils.CommonTestUtils) Collections(java.util.Collections) TemporaryFolder(org.junit.rules.TemporaryFolder) RecipientUnreachableException(org.apache.flink.runtime.rpc.exceptions.RecipientUnreachableException) NoOpInvokable(org.apache.flink.runtime.testtasks.NoOpInvokable) SlotOffer(org.apache.flink.runtime.taskexecutor.slot.SlotOffer) TestingJobMasterPartitionTracker(org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) TestingTaskExecutorGatewayBuilder(org.apache.flink.runtime.taskexecutor.TestingTaskExecutorGatewayBuilder) JobMasterBuilder(org.apache.flink.runtime.jobmaster.utils.JobMasterBuilder) FlinkException(org.apache.flink.util.FlinkException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) CompletableFuture(java.util.concurrent.CompletableFuture) LocalUnresolvedTaskManagerLocation(org.apache.flink.runtime.taskmanager.LocalUnresolvedTaskManagerLocation) TestingTaskExecutorGateway(org.apache.flink.runtime.taskexecutor.TestingTaskExecutorGateway) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Aggregations

TestingJobMasterPartitionTracker (org.apache.flink.runtime.io.network.partition.TestingJobMasterPartitionTracker)7 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)6 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)5 Test (org.junit.Test)5 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)4 ArrayDeque (java.util.ArrayDeque)3 Collection (java.util.Collection)3 CompletableFuture (java.util.concurrent.CompletableFuture)3 TaskExecutionState (org.apache.flink.runtime.taskmanager.TaskExecutionState)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 Function (java.util.function.Function)2 Configuration (org.apache.flink.configuration.Configuration)2 ResourceID (org.apache.flink.runtime.clusterframework.types.ResourceID)2 ResultPartitionDeploymentDescriptor (org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor)2 SchedulerBase (org.apache.flink.runtime.scheduler.SchedulerBase)2 TestLogger (org.apache.flink.util.TestLogger)2 Before (org.junit.Before)2 File (java.io.File)1 IOException (java.io.IOException)1