Search in sources :

Example 1 with SerializedValue

use of org.apache.flink.util.SerializedValue in project flink by apache.

the class TaskExecutorTest method testSubmitTaskBeforeAcceptSlot.

/**
	 * This tests task executor receive SubmitTask before OfferSlot response.
	 */
@Test
public void testSubmitTaskBeforeAcceptSlot() throws Exception {
    final JobID jobId = new JobID();
    final TestingSerialRpcService rpc = new TestingSerialRpcService();
    final Configuration configuration = new Configuration();
    final TaskManagerConfiguration taskManagerConfiguration = TaskManagerConfiguration.fromConfiguration(configuration);
    final ResourceID resourceId = new ResourceID("foobar");
    final TaskManagerLocation taskManagerLocation = new TaskManagerLocation(resourceId, InetAddress.getLoopbackAddress(), 1234);
    final TestingHighAvailabilityServices haServices = new TestingHighAvailabilityServices();
    final TimerService<AllocationID> timerService = mock(TimerService.class);
    final TaskSlotTable taskSlotTable = new TaskSlotTable(Arrays.asList(mock(ResourceProfile.class), mock(ResourceProfile.class)), timerService);
    final JobManagerTable jobManagerTable = new JobManagerTable();
    final JobLeaderService jobLeaderService = new JobLeaderService(taskManagerLocation);
    final TestingFatalErrorHandler testingFatalErrorHandler = new TestingFatalErrorHandler();
    final String resourceManagerAddress = "rm";
    final UUID resourceManagerLeaderId = UUID.randomUUID();
    final String jobManagerAddress = "jm";
    final UUID jobManagerLeaderId = UUID.randomUUID();
    final LeaderRetrievalService resourceManagerLeaderRetrievalService = new TestingLeaderRetrievalService(resourceManagerAddress, resourceManagerLeaderId);
    final LeaderRetrievalService jobManagerLeaderRetrievalService = new TestingLeaderRetrievalService(jobManagerAddress, jobManagerLeaderId);
    haServices.setResourceManagerLeaderRetriever(resourceManagerLeaderRetrievalService);
    haServices.setJobMasterLeaderRetriever(jobId, jobManagerLeaderRetrievalService);
    final ResourceManagerGateway resourceManagerGateway = mock(ResourceManagerGateway.class);
    final InstanceID registrationId = new InstanceID();
    when(resourceManagerGateway.registerTaskExecutor(eq(resourceManagerLeaderId), any(String.class), eq(resourceId), any(SlotReport.class), any(Time.class))).thenReturn(FlinkCompletableFuture.<RegistrationResponse>completed(new TaskExecutorRegistrationSuccess(registrationId, 1000L)));
    final ResourceID jmResourceId = new ResourceID(jobManagerAddress);
    final int blobPort = 42;
    final AllocationID allocationId1 = new AllocationID();
    final AllocationID allocationId2 = new AllocationID();
    final SlotOffer offer1 = new SlotOffer(allocationId1, 0, ResourceProfile.UNKNOWN);
    final JobMasterGateway jobMasterGateway = mock(JobMasterGateway.class);
    when(jobMasterGateway.registerTaskManager(any(String.class), eq(taskManagerLocation), eq(jobManagerLeaderId), any(Time.class))).thenReturn(FlinkCompletableFuture.<RegistrationResponse>completed(new JMTMRegistrationSuccess(jmResourceId, blobPort)));
    when(jobMasterGateway.getHostname()).thenReturn(jobManagerAddress);
    rpc.registerGateway(resourceManagerAddress, resourceManagerGateway);
    rpc.registerGateway(jobManagerAddress, jobMasterGateway);
    final LibraryCacheManager libraryCacheManager = mock(LibraryCacheManager.class);
    when(libraryCacheManager.getClassLoader(eq(jobId))).thenReturn(getClass().getClassLoader());
    final JobManagerConnection jobManagerConnection = new JobManagerConnection(jobId, jmResourceId, jobMasterGateway, jobManagerLeaderId, mock(TaskManagerActions.class), mock(CheckpointResponder.class), libraryCacheManager, mock(ResultPartitionConsumableNotifier.class), mock(PartitionProducerStateChecker.class));
    jobManagerTable.put(jobId, jobManagerConnection);
    try {
        final TaskExecutor taskManager = new TaskExecutor(taskManagerConfiguration, taskManagerLocation, rpc, mock(MemoryManager.class), mock(IOManager.class), mock(NetworkEnvironment.class), haServices, mock(HeartbeatServices.class, RETURNS_MOCKS), mock(MetricRegistry.class), mock(TaskManagerMetricGroup.class), mock(BroadcastVariableManager.class), mock(FileCache.class), taskSlotTable, jobManagerTable, jobLeaderService, testingFatalErrorHandler);
        taskManager.start();
        taskSlotTable.allocateSlot(0, jobId, allocationId1, Time.milliseconds(10000L));
        taskSlotTable.allocateSlot(1, jobId, allocationId2, Time.milliseconds(10000L));
        final JobVertexID jobVertexId = new JobVertexID();
        JobInformation jobInformation = new JobInformation(jobId, name.getMethodName(), new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList());
        TaskInformation taskInformation = new TaskInformation(jobVertexId, "test task", 1, 1, TestInvokable.class.getName(), new Configuration());
        SerializedValue<JobInformation> serializedJobInformation = new SerializedValue<>(jobInformation);
        SerializedValue<TaskInformation> serializedJobVertexInformation = new SerializedValue<>(taskInformation);
        final TaskDeploymentDescriptor tdd = new TaskDeploymentDescriptor(serializedJobInformation, serializedJobVertexInformation, new ExecutionAttemptID(), allocationId1, 0, 0, 0, null, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList());
        CompletableFuture<Iterable<SlotOffer>> offerResultFuture = new FlinkCompletableFuture<>();
        // submit task first and then return acceptance response
        when(jobMasterGateway.offerSlots(any(ResourceID.class), any(Iterable.class), eq(jobManagerLeaderId), any(Time.class))).thenReturn(offerResultFuture);
        // we have to add the job after the TaskExecutor, because otherwise the service has not
        // been properly started. This will also offer the slots to the job master
        jobLeaderService.addJob(jobId, jobManagerAddress);
        verify(jobMasterGateway).offerSlots(any(ResourceID.class), any(Iterable.class), eq(jobManagerLeaderId), any(Time.class));
        // submit the task without having acknowledge the offered slots
        taskManager.submitTask(tdd, jobManagerLeaderId);
        // acknowledge the offered slots
        offerResultFuture.complete(Collections.singleton(offer1));
        verify(resourceManagerGateway).notifySlotAvailable(eq(resourceManagerLeaderId), eq(registrationId), eq(new SlotID(resourceId, 1)));
        assertTrue(taskSlotTable.existsActiveSlot(jobId, allocationId1));
        assertFalse(taskSlotTable.existsActiveSlot(jobId, allocationId2));
        assertTrue(taskSlotTable.isSlotFree(1));
        // check if a concurrent error occurred
        testingFatalErrorHandler.rethrowError();
    } finally {
        rpc.stopService();
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) TestingLeaderRetrievalService(org.apache.flink.runtime.leaderelection.TestingLeaderRetrievalService) InstanceID(org.apache.flink.runtime.instance.InstanceID) ResourceManagerGateway(org.apache.flink.runtime.resourcemanager.ResourceManagerGateway) TaskManagerActions(org.apache.flink.runtime.taskmanager.TaskManagerActions) TestingHighAvailabilityServices(org.apache.flink.runtime.highavailability.TestingHighAvailabilityServices) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) TestingSerialRpcService(org.apache.flink.runtime.rpc.TestingSerialRpcService) PartitionProducerStateChecker(org.apache.flink.runtime.io.network.netty.PartitionProducerStateChecker) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) UUID(java.util.UUID) TestingFatalErrorHandler(org.apache.flink.runtime.util.TestingFatalErrorHandler) SlotOffer(org.apache.flink.runtime.taskexecutor.slot.SlotOffer) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) MetricRegistry(org.apache.flink.runtime.metrics.MetricRegistry) TaskManagerMetricGroup(org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup) FileCache(org.apache.flink.runtime.filecache.FileCache) TaskSlotTable(org.apache.flink.runtime.taskexecutor.slot.TaskSlotTable) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) TestingLeaderRetrievalService(org.apache.flink.runtime.leaderelection.TestingLeaderRetrievalService) NetworkEnvironment(org.apache.flink.runtime.io.network.NetworkEnvironment) JobID(org.apache.flink.api.common.JobID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) Time(org.apache.flink.api.common.time.Time) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) JobMasterGateway(org.apache.flink.runtime.jobmaster.JobMasterGateway) FlinkCompletableFuture(org.apache.flink.runtime.concurrent.impl.FlinkCompletableFuture) ResourceID(org.apache.flink.runtime.clusterframework.types.ResourceID) ResultPartitionConsumableNotifier(org.apache.flink.runtime.io.network.partition.ResultPartitionConsumableNotifier) HeartbeatServices(org.apache.flink.runtime.heartbeat.HeartbeatServices) JMTMRegistrationSuccess(org.apache.flink.runtime.jobmaster.JMTMRegistrationSuccess) JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) LibraryCacheManager(org.apache.flink.runtime.execution.librarycache.LibraryCacheManager) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) SerializedValue(org.apache.flink.util.SerializedValue) SlotID(org.apache.flink.runtime.clusterframework.types.SlotID) Test(org.junit.Test)

Example 2 with SerializedValue

use of org.apache.flink.util.SerializedValue in project flink by apache.

the class TaskManagerTest method testSubmitAndExecuteTask.

@Test
public void testSubmitAndExecuteTask() throws IOException {
    new JavaTestKit(system) {

        {
            ActorGateway taskManager = null;
            final ActorGateway jobManager = TestingUtils.createForwardingActor(system, getTestActor(), Option.<String>empty());
            try {
                taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, false);
                final ActorGateway tm = taskManager;
                // handle the registration
                new Within(d) {

                    @Override
                    protected void run() {
                        expectMsgClass(RegistrationMessages.RegisterTaskManager.class);
                        final InstanceID iid = new InstanceID();
                        assertEquals(tm.actor(), getLastSender());
                        tm.tell(new RegistrationMessages.AcknowledgeRegistration(iid, 12345), jobManager);
                    }
                };
                final JobID jid = new JobID();
                final JobVertexID vid = new JobVertexID();
                final ExecutionAttemptID eid = new ExecutionAttemptID();
                final SerializedValue<ExecutionConfig> executionConfig = new SerializedValue<>(new ExecutionConfig());
                final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, executionConfig, "TestTask", 7, 2, 7, 0, new Configuration(), new Configuration(), TestInvokableCorrect.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                new Within(d) {

                    @Override
                    protected void run() {
                        tm.tell(new SubmitTask(tdd), jobManager);
                        // TaskManager should acknowledge the submission
                        // heartbeats may be interleaved
                        long deadline = System.currentTimeMillis() + 10000;
                        do {
                            Object message = receiveOne(d);
                            if (message.equals(Acknowledge.get())) {
                                break;
                            }
                        } while (System.currentTimeMillis() < deadline);
                        // task should have switched to running
                        Object toRunning = new TaskMessages.UpdateTaskExecutionState(new TaskExecutionState(jid, eid, ExecutionState.RUNNING));
                        // task should have switched to finished
                        Object toFinished = new TaskMessages.UpdateTaskExecutionState(new TaskExecutionState(jid, eid, ExecutionState.FINISHED));
                        deadline = System.currentTimeMillis() + 10000;
                        do {
                            Object message = receiveOne(d);
                            if (message.equals(toRunning)) {
                                break;
                            } else if (!(message instanceof TaskManagerMessages.Heartbeat)) {
                                fail("Unexpected message: " + message);
                            }
                        } while (System.currentTimeMillis() < deadline);
                        deadline = System.currentTimeMillis() + 10000;
                        do {
                            Object message = receiveOne(d);
                            if (message.equals(toFinished)) {
                                break;
                            } else if (!(message instanceof TaskManagerMessages.Heartbeat)) {
                                fail("Unexpected message: " + message);
                            }
                        } while (System.currentTimeMillis() < deadline);
                    }
                };
            } finally {
                // shut down the actors
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : RegistrationMessages(org.apache.flink.runtime.messages.RegistrationMessages) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) InstanceID(org.apache.flink.runtime.instance.InstanceID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) SerializedValue(org.apache.flink.util.SerializedValue) BlobKey(org.apache.flink.runtime.blob.BlobKey) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) TaskManagerMessages(org.apache.flink.runtime.messages.TaskManagerMessages) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 3 with SerializedValue

use of org.apache.flink.util.SerializedValue in project flink by apache.

the class TaskManagerTest method createTaskDeploymentDescriptor.

private static TaskDeploymentDescriptor createTaskDeploymentDescriptor(JobID jobId, String jobName, JobVertexID jobVertexId, ExecutionAttemptID executionAttemptId, SerializedValue<ExecutionConfig> serializedExecutionConfig, String taskName, int numberOfKeyGroups, int subtaskIndex, int parallelism, int attemptNumber, Configuration jobConfiguration, Configuration taskConfiguration, String invokableClassName, Collection<ResultPartitionDeploymentDescriptor> producedPartitions, Collection<InputGateDeploymentDescriptor> inputGates, Collection<BlobKey> requiredJarFiles, Collection<URL> requiredClasspaths, int targetSlotNumber) throws IOException {
    JobInformation jobInformation = new JobInformation(jobId, jobName, serializedExecutionConfig, jobConfiguration, requiredJarFiles, requiredClasspaths);
    TaskInformation taskInformation = new TaskInformation(jobVertexId, taskName, parallelism, numberOfKeyGroups, invokableClassName, taskConfiguration);
    SerializedValue<JobInformation> serializedJobInformation = new SerializedValue<>(jobInformation);
    SerializedValue<TaskInformation> serializedJobVertexInformation = new SerializedValue<>(taskInformation);
    return new TaskDeploymentDescriptor(serializedJobInformation, serializedJobVertexInformation, executionAttemptId, new AllocationID(), subtaskIndex, attemptNumber, targetSlotNumber, null, producedPartitions, inputGates);
}
Also used : JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SerializedValue(org.apache.flink.util.SerializedValue)

Example 4 with SerializedValue

use of org.apache.flink.util.SerializedValue in project flink by apache.

the class TaskManagerTest method testFailingScheduleOrUpdateConsumersMessage.

/**
	 * Test that a failing schedule or update consumers call leads to the failing of the respective
	 * task.
	 *
	 * IMPORTANT: We have to make sure that the invokable's cancel method is called, because only
	 * then the future is completed. We do this by not eagerly deploy consumer tasks and requiring
	 * the invokable to fill one memory segment. The completed memory segment will trigger the
	 * scheduling of the downstream operator since it is in pipeline mode. After we've filled the
	 * memory segment, we'll block the invokable and wait for the task failure due to the failed
	 * schedule or update consumers call.
	 */
@Test(timeout = 10000L)
public void testFailingScheduleOrUpdateConsumersMessage() throws Exception {
    new JavaTestKit(system) {

        {
            final Configuration configuration = new Configuration();
            // set the memory segment to the smallest size possible, because we have to fill one
            // memory buffer to trigger the schedule or update consumers message to the downstream
            // operators
            configuration.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SEGMENT_SIZE_KEY, 4096);
            final JobID jid = new JobID();
            final JobVertexID vid = new JobVertexID();
            final ExecutionAttemptID eid = new ExecutionAttemptID();
            final SerializedValue<ExecutionConfig> executionConfig = new SerializedValue<>(new ExecutionConfig());
            final ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), new IntermediateResultPartitionID(), ResultPartitionType.PIPELINED, 1, 1, true);
            final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, executionConfig, "TestTask", 1, 0, 1, 0, new Configuration(), new Configuration(), TestInvokableRecordCancel.class.getName(), Collections.singletonList(resultPartitionDeploymentDescriptor), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
            ActorRef jmActorRef = system.actorOf(Props.create(FailingScheduleOrUpdateConsumersJobManager.class, leaderSessionID), "jobmanager");
            ActorGateway jobManager = new AkkaActorGateway(jmActorRef, leaderSessionID);
            final ActorGateway taskManager = TestingUtils.createTaskManager(system, jobManager, configuration, true, true);
            try {
                TestInvokableRecordCancel.resetGotCanceledFuture();
                Future<Object> result = taskManager.ask(new SubmitTask(tdd), timeout);
                Await.result(result, timeout);
                org.apache.flink.runtime.concurrent.Future<Boolean> cancelFuture = TestInvokableRecordCancel.gotCanceled();
                assertEquals(true, cancelFuture.get());
            } finally {
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) SerializedValue(org.apache.flink.util.SerializedValue) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 5 with SerializedValue

use of org.apache.flink.util.SerializedValue in project flink by apache.

the class ArchivedExecutionGraphBuilder method build.

public ArchivedExecutionGraph build() {
    Preconditions.checkNotNull(tasks, "Tasks must not be null.");
    JobID jobID = this.jobID != null ? this.jobID : new JobID();
    String jobName = this.jobName != null ? this.jobName : "job_" + RANDOM.nextInt();
    return new ArchivedExecutionGraph(jobID, jobName, tasks, verticesInCreationOrder != null ? verticesInCreationOrder : new ArrayList<>(tasks.values()), stateTimestamps != null ? stateTimestamps : new long[JobStatus.values().length], state != null ? state : JobStatus.FINISHED, failureCause != null ? failureCause : "(null)", jsonPlan != null ? jsonPlan : "{\"jobid\":\"" + jobID + "\", \"name\":\"" + jobName + "\", \"nodes\":[]}", archivedUserAccumulators != null ? archivedUserAccumulators : new StringifiedAccumulatorResult[0], serializedUserAccumulators != null ? serializedUserAccumulators : Collections.<String, SerializedValue<Object>>emptyMap(), archivedExecutionConfig != null ? archivedExecutionConfig : new ArchivedExecutionConfigBuilder().build(), isStoppable, null, null);
}
Also used : ArrayList(java.util.ArrayList) StringifiedAccumulatorResult(org.apache.flink.runtime.accumulators.StringifiedAccumulatorResult) ArchivedExecutionGraph(org.apache.flink.runtime.executiongraph.ArchivedExecutionGraph) SerializedValue(org.apache.flink.util.SerializedValue) JobID(org.apache.flink.api.common.JobID)

Aggregations

SerializedValue (org.apache.flink.util.SerializedValue)13 JobID (org.apache.flink.api.common.JobID)11 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)9 Configuration (org.apache.flink.configuration.Configuration)9 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)8 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)7 Test (org.junit.Test)7 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)6 AllocationID (org.apache.flink.runtime.clusterframework.types.AllocationID)5 JobInformation (org.apache.flink.runtime.executiongraph.JobInformation)5 TaskInformation (org.apache.flink.runtime.executiongraph.TaskInformation)5 ArrayList (java.util.ArrayList)4 BlobKey (org.apache.flink.runtime.blob.BlobKey)4 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)4 JavaTestKit (akka.testkit.JavaTestKit)3 BroadcastVariableManager (org.apache.flink.runtime.broadcast.BroadcastVariableManager)3 FileCache (org.apache.flink.runtime.filecache.FileCache)3 IOManager (org.apache.flink.runtime.io.disk.iomanager.IOManager)3 ActorRef (akka.actor.ActorRef)2 IOException (java.io.IOException)2