Search in sources :

Example 61 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class CheckpointCoordinatorTest method testCheckpointAbortsIfTriggerTasksAreNotExecuted.

@Test
public void testCheckpointAbortsIfTriggerTasksAreNotExecuted() {
    try {
        final JobID jid = new JobID();
        final long timestamp = System.currentTimeMillis();
        // create some mock Execution vertices that receive the checkpoint trigger messages
        ExecutionVertex triggerVertex1 = mock(ExecutionVertex.class);
        ExecutionVertex triggerVertex2 = mock(ExecutionVertex.class);
        // create some mock Execution vertices that need to ack the checkpoint
        final ExecutionAttemptID ackAttemptID1 = new ExecutionAttemptID();
        final ExecutionAttemptID ackAttemptID2 = new ExecutionAttemptID();
        ExecutionVertex ackVertex1 = mockExecutionVertex(ackAttemptID1);
        ExecutionVertex ackVertex2 = mockExecutionVertex(ackAttemptID2);
        // set up the coordinator and validate the initial state
        CheckpointCoordinator coord = new CheckpointCoordinator(jid, 600000, 600000, 0, Integer.MAX_VALUE, ExternalizedCheckpointSettings.none(), new ExecutionVertex[] { triggerVertex1, triggerVertex2 }, new ExecutionVertex[] { ackVertex1, ackVertex2 }, new ExecutionVertex[] {}, new StandaloneCheckpointIDCounter(), new StandaloneCompletedCheckpointStore(1), null, Executors.directExecutor());
        // nothing should be happening
        assertEquals(0, coord.getNumberOfPendingCheckpoints());
        assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints());
        // trigger the first checkpoint. this should not succeed
        assertFalse(coord.triggerCheckpoint(timestamp, false));
        // still, nothing should be happening
        assertEquals(0, coord.getNumberOfPendingCheckpoints());
        assertEquals(0, coord.getNumberOfRetainedSuccessfulCheckpoints());
        coord.shutdown(JobStatus.FINISHED);
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) JobID(org.apache.flink.api.common.JobID) ExecutionVertex(org.apache.flink.runtime.executiongraph.ExecutionVertex) IOException(java.io.IOException) Test(org.junit.Test)

Example 62 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class NettyMessageSerializationTest method testEncodeDecode.

@Test
public void testEncodeDecode() {
    {
        Buffer buffer = spy(new Buffer(MemorySegmentFactory.allocateUnpooledSegment(1024), mock(BufferRecycler.class)));
        ByteBuffer nioBuffer = buffer.getNioBuffer();
        for (int i = 0; i < 1024; i += 4) {
            nioBuffer.putInt(i);
        }
        NettyMessage.BufferResponse expected = new NettyMessage.BufferResponse(buffer, random.nextInt(), new InputChannelID());
        NettyMessage.BufferResponse actual = encodeAndDecode(expected);
        // Verify recycle has been called on buffer instance
        verify(buffer, times(1)).recycle();
        final ByteBuf retainedSlice = actual.getNettyBuffer();
        // Ensure not recycled and same size as original buffer
        assertEquals(1, retainedSlice.refCnt());
        assertEquals(1024, retainedSlice.readableBytes());
        nioBuffer = retainedSlice.nioBuffer();
        for (int i = 0; i < 1024; i += 4) {
            assertEquals(i, nioBuffer.getInt());
        }
        // Release the retained slice
        actual.releaseBuffer();
        assertEquals(0, retainedSlice.refCnt());
        assertEquals(expected.sequenceNumber, actual.sequenceNumber);
        assertEquals(expected.receiverId, actual.receiverId);
    }
    {
        {
            IllegalStateException expectedError = new IllegalStateException();
            InputChannelID receiverId = new InputChannelID();
            NettyMessage.ErrorResponse expected = new NettyMessage.ErrorResponse(expectedError, receiverId);
            NettyMessage.ErrorResponse actual = encodeAndDecode(expected);
            assertEquals(expected.cause.getClass(), actual.cause.getClass());
            assertEquals(expected.cause.getMessage(), actual.cause.getMessage());
            assertEquals(receiverId, actual.receiverId);
        }
        {
            IllegalStateException expectedError = new IllegalStateException("Illegal illegal illegal");
            InputChannelID receiverId = new InputChannelID();
            NettyMessage.ErrorResponse expected = new NettyMessage.ErrorResponse(expectedError, receiverId);
            NettyMessage.ErrorResponse actual = encodeAndDecode(expected);
            assertEquals(expected.cause.getClass(), actual.cause.getClass());
            assertEquals(expected.cause.getMessage(), actual.cause.getMessage());
            assertEquals(receiverId, actual.receiverId);
        }
        {
            IllegalStateException expectedError = new IllegalStateException("Illegal illegal illegal");
            NettyMessage.ErrorResponse expected = new NettyMessage.ErrorResponse(expectedError);
            NettyMessage.ErrorResponse actual = encodeAndDecode(expected);
            assertEquals(expected.cause.getClass(), actual.cause.getClass());
            assertEquals(expected.cause.getMessage(), actual.cause.getMessage());
            assertNull(actual.receiverId);
            assertTrue(actual.isFatalError());
        }
    }
    {
        NettyMessage.PartitionRequest expected = new NettyMessage.PartitionRequest(new ResultPartitionID(new IntermediateResultPartitionID(), new ExecutionAttemptID()), random.nextInt(), new InputChannelID());
        NettyMessage.PartitionRequest actual = encodeAndDecode(expected);
        assertEquals(expected.partitionId, actual.partitionId);
        assertEquals(expected.queueIndex, actual.queueIndex);
        assertEquals(expected.receiverId, actual.receiverId);
    }
    {
        NettyMessage.TaskEventRequest expected = new NettyMessage.TaskEventRequest(new IntegerTaskEvent(random.nextInt()), new ResultPartitionID(new IntermediateResultPartitionID(), new ExecutionAttemptID()), new InputChannelID());
        NettyMessage.TaskEventRequest actual = encodeAndDecode(expected);
        assertEquals(expected.event, actual.event);
        assertEquals(expected.partitionId, actual.partitionId);
        assertEquals(expected.receiverId, actual.receiverId);
    }
    {
        NettyMessage.CancelPartitionRequest expected = new NettyMessage.CancelPartitionRequest(new InputChannelID());
        NettyMessage.CancelPartitionRequest actual = encodeAndDecode(expected);
        assertEquals(expected.receiverId, actual.receiverId);
    }
    {
        NettyMessage.CloseRequest expected = new NettyMessage.CloseRequest();
        NettyMessage.CloseRequest actual = encodeAndDecode(expected);
        assertEquals(expected.getClass(), actual.getClass());
    }
}
Also used : Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) ByteBuffer(java.nio.ByteBuffer) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) ByteBuf(io.netty.buffer.ByteBuf) ByteBuffer(java.nio.ByteBuffer) IntegerTaskEvent(org.apache.flink.runtime.event.task.IntegerTaskEvent) InputChannelID(org.apache.flink.runtime.io.network.partition.consumer.InputChannelID) BufferRecycler(org.apache.flink.runtime.io.network.buffer.BufferRecycler) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 63 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskExecutorTest method testTaskSubmission.

/**
	 * Tests that we can submit a task to the TaskManager given that we've allocated a slot there.
	 */
@Test(timeout = 1000L)
public void testTaskSubmission() throws Exception {
    final Configuration configuration = new Configuration();
    final TestingSerialRpcService rpc = new TestingSerialRpcService();
    final TaskManagerConfiguration taskManagerConfiguration = TaskManagerConfiguration.fromConfiguration(configuration);
    final JobID jobId = new JobID();
    final AllocationID allocationId = new AllocationID();
    final UUID jobManagerLeaderId = UUID.randomUUID();
    final JobVertexID jobVertexId = new JobVertexID();
    JobInformation jobInformation = new JobInformation(jobId, name.getMethodName(), new SerializedValue<>(new ExecutionConfig()), new Configuration(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList());
    TaskInformation taskInformation = new TaskInformation(jobVertexId, "test task", 1, 1, TestInvokable.class.getName(), new Configuration());
    SerializedValue<JobInformation> serializedJobInformation = new SerializedValue<>(jobInformation);
    SerializedValue<TaskInformation> serializedJobVertexInformation = new SerializedValue<>(taskInformation);
    final TaskDeploymentDescriptor tdd = new TaskDeploymentDescriptor(serializedJobInformation, serializedJobVertexInformation, new ExecutionAttemptID(), allocationId, 0, 0, 0, null, Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList());
    final LibraryCacheManager libraryCacheManager = mock(LibraryCacheManager.class);
    when(libraryCacheManager.getClassLoader(eq(jobId))).thenReturn(getClass().getClassLoader());
    final JobManagerConnection jobManagerConnection = new JobManagerConnection(jobId, ResourceID.generate(), mock(JobMasterGateway.class), jobManagerLeaderId, mock(TaskManagerActions.class), mock(CheckpointResponder.class), libraryCacheManager, mock(ResultPartitionConsumableNotifier.class), mock(PartitionProducerStateChecker.class));
    final JobManagerTable jobManagerTable = new JobManagerTable();
    jobManagerTable.put(jobId, jobManagerConnection);
    final TaskSlotTable taskSlotTable = mock(TaskSlotTable.class);
    when(taskSlotTable.existsActiveSlot(eq(jobId), eq(allocationId))).thenReturn(true);
    when(taskSlotTable.addTask(any(Task.class))).thenReturn(true);
    final NetworkEnvironment networkEnvironment = mock(NetworkEnvironment.class);
    when(networkEnvironment.createKvStateTaskRegistry(eq(jobId), eq(jobVertexId))).thenReturn(mock(TaskKvStateRegistry.class));
    final TaskManagerMetricGroup taskManagerMetricGroup = mock(TaskManagerMetricGroup.class);
    when(taskManagerMetricGroup.addTaskForJob(any(JobID.class), anyString(), any(JobVertexID.class), any(ExecutionAttemptID.class), anyString(), anyInt(), anyInt())).thenReturn(mock(TaskMetricGroup.class));
    final HighAvailabilityServices haServices = mock(HighAvailabilityServices.class);
    when(haServices.getResourceManagerLeaderRetriever()).thenReturn(mock(LeaderRetrievalService.class));
    try {
        final TestingFatalErrorHandler testingFatalErrorHandler = new TestingFatalErrorHandler();
        TaskExecutor taskManager = new TaskExecutor(taskManagerConfiguration, mock(TaskManagerLocation.class), rpc, mock(MemoryManager.class), mock(IOManager.class), networkEnvironment, haServices, mock(HeartbeatServices.class, RETURNS_MOCKS), mock(MetricRegistry.class), taskManagerMetricGroup, mock(BroadcastVariableManager.class), mock(FileCache.class), taskSlotTable, jobManagerTable, mock(JobLeaderService.class), testingFatalErrorHandler);
        taskManager.start();
        taskManager.submitTask(tdd, jobManagerLeaderId);
        Future<Boolean> completionFuture = TestInvokable.completableFuture;
        completionFuture.get();
        // check if a concurrent error occurred
        testingFatalErrorHandler.rethrowError();
    } finally {
        rpc.stopService();
    }
}
Also used : Task(org.apache.flink.runtime.taskmanager.Task) Configuration(org.apache.flink.configuration.Configuration) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TaskKvStateRegistry(org.apache.flink.runtime.query.TaskKvStateRegistry) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) JobMasterGateway(org.apache.flink.runtime.jobmaster.JobMasterGateway) TaskManagerActions(org.apache.flink.runtime.taskmanager.TaskManagerActions) BroadcastVariableManager(org.apache.flink.runtime.broadcast.BroadcastVariableManager) TestingSerialRpcService(org.apache.flink.runtime.rpc.TestingSerialRpcService) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) PartitionProducerStateChecker(org.apache.flink.runtime.io.network.netty.PartitionProducerStateChecker) UUID(java.util.UUID) ResultPartitionConsumableNotifier(org.apache.flink.runtime.io.network.partition.ResultPartitionConsumableNotifier) TestingFatalErrorHandler(org.apache.flink.runtime.util.TestingFatalErrorHandler) HeartbeatServices(org.apache.flink.runtime.heartbeat.HeartbeatServices) JobInformation(org.apache.flink.runtime.executiongraph.JobInformation) TaskInformation(org.apache.flink.runtime.executiongraph.TaskInformation) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) IOManager(org.apache.flink.runtime.io.disk.iomanager.IOManager) CheckpointResponder(org.apache.flink.runtime.taskmanager.CheckpointResponder) TaskMetricGroup(org.apache.flink.runtime.metrics.groups.TaskMetricGroup) TaskManagerLocation(org.apache.flink.runtime.taskmanager.TaskManagerLocation) AllocationID(org.apache.flink.runtime.clusterframework.types.AllocationID) TaskManagerMetricGroup(org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup) MetricRegistry(org.apache.flink.runtime.metrics.MetricRegistry) LibraryCacheManager(org.apache.flink.runtime.execution.librarycache.LibraryCacheManager) SerializedValue(org.apache.flink.util.SerializedValue) MemoryManager(org.apache.flink.runtime.memory.MemoryManager) FileCache(org.apache.flink.runtime.filecache.FileCache) TaskSlotTable(org.apache.flink.runtime.taskexecutor.slot.TaskSlotTable) HighAvailabilityServices(org.apache.flink.runtime.highavailability.HighAvailabilityServices) TestingHighAvailabilityServices(org.apache.flink.runtime.highavailability.TestingHighAvailabilityServices) LeaderRetrievalService(org.apache.flink.runtime.leaderretrieval.LeaderRetrievalService) TestingLeaderRetrievalService(org.apache.flink.runtime.leaderelection.TestingLeaderRetrievalService) NetworkEnvironment(org.apache.flink.runtime.io.network.NetworkEnvironment) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 64 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskManagerTest method testUpdateTaskInputPartitionsFailure.

/**
	 * Tests that the TaskManager sends a proper exception back to the sender if the trigger stack
	 * trace message fails.
	 */
@Test
public void testUpdateTaskInputPartitionsFailure() throws Exception {
    ActorGateway jobManager = null;
    ActorGateway taskManager = null;
    try {
        final ExecutionAttemptID executionAttemptId = new ExecutionAttemptID();
        ActorRef jm = system.actorOf(Props.create(SimpleJobManager.class, leaderSessionID));
        jobManager = new AkkaActorGateway(jm, leaderSessionID);
        taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
        TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(new JobID(), "test job", new JobVertexID(), executionAttemptId, new SerializedValue<>(new ExecutionConfig()), "test task", 1, 0, 1, 0, new Configuration(), new Configuration(), BlockingNoOpInvokable.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), 0);
        Future<Object> submitResponse = taskManager.ask(new SubmitTask(tdd), timeout);
        Await.result(submitResponse, timeout);
        Future<Object> partitionUpdateResponse = taskManager.ask(new TaskMessages.UpdateTaskSinglePartitionInfo(executionAttemptId, new IntermediateDataSetID(), new InputChannelDeploymentDescriptor(new ResultPartitionID(), ResultPartitionLocation.createLocal())), timeout);
        try {
            Await.result(partitionUpdateResponse, timeout);
            fail("The update task input partitions message should have failed.");
        } catch (Exception e) {
        // expected
        }
    } finally {
        TestingUtils.stopActor(jobManager);
        TestingUtils.stopActor(taskManager);
    }
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TaskMessages(org.apache.flink.runtime.messages.TaskMessages) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) BlockingNoOpInvokable(org.apache.flink.runtime.testtasks.BlockingNoOpInvokable) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) InputChannelDeploymentDescriptor(org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 65 with ExecutionAttemptID

use of org.apache.flink.runtime.executiongraph.ExecutionAttemptID in project flink by apache.

the class TaskManagerTest method testGateChannelEdgeMismatch.

@Test
public void testGateChannelEdgeMismatch() {
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                ActorRef jm = system.actorOf(Props.create(SimpleJobManager.class, leaderSessionID));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
                final ActorGateway tm = taskManager;
                final JobID jid = new JobID();
                JobVertexID vid1 = new JobVertexID();
                JobVertexID vid2 = new JobVertexID();
                final ExecutionAttemptID eid1 = new ExecutionAttemptID();
                final ExecutionAttemptID eid2 = new ExecutionAttemptID();
                final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.Receiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                new Within(d) {

                    @Override
                    protected void run() {
                        try {
                            tm.tell(new SubmitTask(tdd1), testActorGateway);
                            tm.tell(new SubmitTask(tdd2), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            expectMsgEquals(Acknowledge.get());
                            tm.tell(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), testActorGateway);
                            tm.tell(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), testActorGateway);
                            expectMsgEquals(true);
                            expectMsgEquals(true);
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(0, tasks.size());
                        } catch (Exception e) {
                            e.printStackTrace();
                            fail(e.getMessage());
                        }
                    }
                };
            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            } finally {
                // shut down the actors
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) StopTask(org.apache.flink.runtime.messages.TaskMessages.StopTask) CancelTask(org.apache.flink.runtime.messages.TaskMessages.CancelTask) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) Test(org.junit.Test)

Aggregations

ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)81 Test (org.junit.Test)66 JobID (org.apache.flink.api.common.JobID)61 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)41 IOException (java.io.IOException)31 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)30 Configuration (org.apache.flink.configuration.Configuration)24 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)21 AcknowledgeCheckpoint (org.apache.flink.runtime.messages.checkpoint.AcknowledgeCheckpoint)19 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)17 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)16 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)15 TaskManagerServicesConfiguration (org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration)14 ActorRef (akka.actor.ActorRef)13 SubmitTask (org.apache.flink.runtime.messages.TaskMessages.SubmitTask)13 JavaTestKit (akka.testkit.JavaTestKit)12 BlobKey (org.apache.flink.runtime.blob.BlobKey)10 TriggerStackTraceSample (org.apache.flink.runtime.messages.StackTraceSampleMessages.TriggerStackTraceSample)10 PartitionNotFoundException (org.apache.flink.runtime.io.network.partition.PartitionNotFoundException)9 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)9