Search in sources :

Example 11 with IntermediateDataSetID

use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.

the class InputGateFairnessTest method testFairConsumptionLocalChannelsPreFilled.

@Test
public void testFairConsumptionLocalChannelsPreFilled() throws Exception {
    final int numChannels = 37;
    final int buffersPerChannel = 27;
    final ResultPartition resultPartition = mock(ResultPartition.class);
    final Buffer mockBuffer = createMockBuffer(42);
    // ----- create some source channels and fill them with buffers -----
    final PipelinedSubpartition[] sources = new PipelinedSubpartition[numChannels];
    for (int i = 0; i < numChannels; i++) {
        PipelinedSubpartition partition = new PipelinedSubpartition(0, resultPartition);
        for (int p = 0; p < buffersPerChannel; p++) {
            partition.add(mockBuffer);
        }
        partition.finish();
        sources[i] = partition;
    }
    // ----- create reading side -----
    ResultPartitionManager resultPartitionManager = createResultPartitionManager(sources);
    SingleInputGate gate = new FairnessVerifyingInputGate("Test Task Name", new JobID(), new IntermediateDataSetID(), 0, numChannels, mock(TaskActions.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
    for (int i = 0; i < numChannels; i++) {
        LocalInputChannel channel = new LocalInputChannel(gate, i, new ResultPartitionID(), resultPartitionManager, mock(TaskEventDispatcher.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
        gate.setInputChannel(new IntermediateResultPartitionID(), channel);
    }
    // read all the buffers and the EOF event
    for (int i = numChannels * (buffersPerChannel + 1); i > 0; --i) {
        assertNotNull(gate.getNextBufferOrEvent());
        int min = Integer.MAX_VALUE;
        int max = 0;
        for (PipelinedSubpartition source : sources) {
            int size = source.getCurrentNumberOfBuffers();
            min = Math.min(min, size);
            max = Math.max(max, size);
        }
        assertTrue(max == min || max == min + 1);
    }
    assertNull(gate.getNextBufferOrEvent());
}
Also used : InputChannelTestUtils.createMockBuffer(org.apache.flink.runtime.io.network.partition.InputChannelTestUtils.createMockBuffer) Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) UnregisteredTaskMetricsGroup(org.apache.flink.runtime.operators.testutils.UnregisteredTaskMetricsGroup) TaskActions(org.apache.flink.runtime.taskmanager.TaskActions) InputChannelTestUtils.createResultPartitionManager(org.apache.flink.runtime.io.network.partition.InputChannelTestUtils.createResultPartitionManager) SingleInputGate(org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) TaskEventDispatcher(org.apache.flink.runtime.io.network.TaskEventDispatcher) LocalInputChannel(org.apache.flink.runtime.io.network.partition.consumer.LocalInputChannel) JobID(org.apache.flink.api.common.JobID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 12 with IntermediateDataSetID

use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.

the class InputGateFairnessTest method testFairConsumptionRemoteChannelsPreFilled.

@Test
public void testFairConsumptionRemoteChannelsPreFilled() throws Exception {
    final int numChannels = 37;
    final int buffersPerChannel = 27;
    final Buffer mockBuffer = createMockBuffer(42);
    // ----- create some source channels and fill them with buffers -----
    SingleInputGate gate = new FairnessVerifyingInputGate("Test Task Name", new JobID(), new IntermediateDataSetID(), 0, numChannels, mock(TaskActions.class), new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
    final ConnectionManager connManager = createDummyConnectionManager();
    final RemoteInputChannel[] channels = new RemoteInputChannel[numChannels];
    for (int i = 0; i < numChannels; i++) {
        RemoteInputChannel channel = new RemoteInputChannel(gate, i, new ResultPartitionID(), mock(ConnectionID.class), connManager, 0, 0, new UnregisteredTaskMetricsGroup.DummyTaskIOMetricGroup());
        channels[i] = channel;
        for (int p = 0; p < buffersPerChannel; p++) {
            channel.onBuffer(mockBuffer, p);
        }
        channel.onBuffer(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE), buffersPerChannel);
        gate.setInputChannel(new IntermediateResultPartitionID(), channel);
    }
    // read all the buffers and the EOF event
    for (int i = numChannels * (buffersPerChannel + 1); i > 0; --i) {
        assertNotNull(gate.getNextBufferOrEvent());
        int min = Integer.MAX_VALUE;
        int max = 0;
        for (RemoteInputChannel channel : channels) {
            int size = channel.getNumberOfQueuedBuffers();
            min = Math.min(min, size);
            max = Math.max(max, size);
        }
        assertTrue(max == min || max == min + 1);
    }
    assertNull(gate.getNextBufferOrEvent());
}
Also used : InputChannelTestUtils.createMockBuffer(org.apache.flink.runtime.io.network.partition.InputChannelTestUtils.createMockBuffer) Buffer(org.apache.flink.runtime.io.network.buffer.Buffer) UnregisteredTaskMetricsGroup(org.apache.flink.runtime.operators.testutils.UnregisteredTaskMetricsGroup) TaskActions(org.apache.flink.runtime.taskmanager.TaskActions) SingleInputGate(org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate) RemoteInputChannel(org.apache.flink.runtime.io.network.partition.consumer.RemoteInputChannel) ConnectionID(org.apache.flink.runtime.io.network.ConnectionID) InputChannelTestUtils.createDummyConnectionManager(org.apache.flink.runtime.io.network.partition.InputChannelTestUtils.createDummyConnectionManager) ConnectionManager(org.apache.flink.runtime.io.network.ConnectionManager) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) JobID(org.apache.flink.api.common.JobID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 13 with IntermediateDataSetID

use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.

the class TaskManagerTest method testCancellingDependentAndStateUpdateFails.

@Test
public void testCancellingDependentAndStateUpdateFails() {
    // this tests creates two tasks. the sender sends data, and fails to send the
    // state update back to the job manager
    // the second one blocks to be canceled
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                final JobID jid = new JobID();
                JobVertexID vid1 = new JobVertexID();
                JobVertexID vid2 = new JobVertexID();
                final ExecutionAttemptID eid1 = new ExecutionAttemptID();
                final ExecutionAttemptID eid2 = new ExecutionAttemptID();
                ActorRef jm = system.actorOf(Props.create(new SimpleLookupFailingUpdateJobManagerCreator(leaderSessionID, eid2)));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
                final ActorGateway tm = taskManager;
                IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
                List<ResultPartitionDeploymentDescriptor> irpdd = new ArrayList<ResultPartitionDeploymentDescriptor>();
                irpdd.add(new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), partitionId, ResultPartitionType.PIPELINED, 1, 1, true));
                InputGateDeploymentDescriptor ircdd = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(new ResultPartitionID(partitionId, eid1), ResultPartitionLocation.createLocal()) });
                final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), irpdd, Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.BlockingReceiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(ircdd), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                new Within(d) {

                    @Override
                    protected void run() {
                        try {
                            Future<Object> t1Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid1), timeout);
                            Future<Object> t2Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid2), timeout);
                            tm.tell(new SubmitTask(tdd2), testActorGateway);
                            tm.tell(new SubmitTask(tdd1), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            expectMsgEquals(Acknowledge.get());
                            Await.ready(t1Running, d);
                            Await.ready(t2Running, d);
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            Task t1 = tasks.get(eid1);
                            Task t2 = tasks.get(eid2);
                            tm.tell(new CancelTask(eid2), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            if (t2 != null) {
                                Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), timeout);
                                Await.ready(response, d);
                            }
                            if (t1 != null) {
                                if (t1.getExecutionState() == ExecutionState.RUNNING) {
                                    tm.tell(new CancelTask(eid1), testActorGateway);
                                    expectMsgEquals(Acknowledge.get());
                                }
                                Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), timeout);
                                Await.ready(response, d);
                            }
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(0, tasks.size());
                        } catch (Exception e) {
                            e.printStackTrace();
                            fail(e.getMessage());
                        }
                    }
                };
            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            } finally {
                // shut down the actors
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) StopTask(org.apache.flink.runtime.messages.TaskMessages.StopTask) CancelTask(org.apache.flink.runtime.messages.TaskMessages.CancelTask) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ArrayList(java.util.ArrayList) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) CancelTask(org.apache.flink.runtime.messages.TaskMessages.CancelTask) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) InputGateDeploymentDescriptor(org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) InputChannelDeploymentDescriptor(org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Example 14 with IntermediateDataSetID

use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.

the class TaskManagerTest method testRemotePartitionNotFound.

/**
	 * Tests that repeated remote {@link PartitionNotFoundException}s ultimately fail the receiver.
	 */
@Test
public void testRemotePartitionNotFound() throws Exception {
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                final IntermediateDataSetID resultId = new IntermediateDataSetID();
                // Create the JM
                ActorRef jm = system.actorOf(Props.create(new SimplePartitionStateLookupJobManagerCreator(leaderSessionID, getTestActor())));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                final int dataPort = NetUtils.getAvailablePort();
                Configuration config = new Configuration();
                config.setInteger(ConfigConstants.TASK_MANAGER_DATA_PORT_KEY, dataPort);
                config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_INITIAL, 100);
                config.setInteger(TaskManagerOptions.NETWORK_REQUEST_BACKOFF_MAX, 200);
                taskManager = TestingUtils.createTaskManager(system, jobManager, config, false, true);
                // ---------------------------------------------------------------------------------
                final ActorGateway tm = taskManager;
                final JobID jid = new JobID();
                final JobVertexID vid = new JobVertexID();
                final ExecutionAttemptID eid = new ExecutionAttemptID();
                final ResultPartitionID partitionId = new ResultPartitionID();
                // Remote location (on the same TM though) for the partition
                final ResultPartitionLocation loc = ResultPartitionLocation.createRemote(new ConnectionID(new InetSocketAddress("localhost", dataPort), 0));
                final InputChannelDeploymentDescriptor[] icdd = new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(partitionId, loc) };
                final InputGateDeploymentDescriptor igdd = new InputGateDeploymentDescriptor(resultId, ResultPartitionType.PIPELINED, 0, icdd);
                final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, new SerializedValue<>(new ExecutionConfig()), "Receiver", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.AgnosticReceiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(igdd), Collections.<BlobKey>emptyList(), Collections.<URL>emptyList(), 0);
                new Within(d) {

                    @Override
                    protected void run() {
                        // Submit the task
                        tm.tell(new SubmitTask(tdd), testActorGateway);
                        expectMsgClass(Acknowledge.get().getClass());
                        // Wait to be notified about the final execution state by the mock JM
                        TaskExecutionState msg = expectMsgClass(TaskExecutionState.class);
                        // The task should fail after repeated requests
                        assertEquals(ExecutionState.FAILED, msg.getExecutionState());
                        Throwable t = msg.getError(ClassLoader.getSystemClassLoader());
                        assertEquals("Thrown exception was not a PartitionNotFoundException: " + t.getMessage(), PartitionNotFoundException.class, t.getClass());
                    }
                };
            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            } finally {
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) InetSocketAddress(java.net.InetSocketAddress) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) ResultPartitionLocation(org.apache.flink.runtime.deployment.ResultPartitionLocation) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) InputGateDeploymentDescriptor(org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) ConnectionID(org.apache.flink.runtime.io.network.ConnectionID) InputChannelDeploymentDescriptor(org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 15 with IntermediateDataSetID

use of org.apache.flink.runtime.jobgraph.IntermediateDataSetID in project flink by apache.

the class TaskManagerTest method testFailingScheduleOrUpdateConsumersMessage.

/**
	 * Test that a failing schedule or update consumers call leads to the failing of the respective
	 * task.
	 *
	 * IMPORTANT: We have to make sure that the invokable's cancel method is called, because only
	 * then the future is completed. We do this by not eagerly deploy consumer tasks and requiring
	 * the invokable to fill one memory segment. The completed memory segment will trigger the
	 * scheduling of the downstream operator since it is in pipeline mode. After we've filled the
	 * memory segment, we'll block the invokable and wait for the task failure due to the failed
	 * schedule or update consumers call.
	 */
@Test(timeout = 10000L)
public void testFailingScheduleOrUpdateConsumersMessage() throws Exception {
    new JavaTestKit(system) {

        {
            final Configuration configuration = new Configuration();
            // set the memory segment to the smallest size possible, because we have to fill one
            // memory buffer to trigger the schedule or update consumers message to the downstream
            // operators
            configuration.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SEGMENT_SIZE_KEY, 4096);
            final JobID jid = new JobID();
            final JobVertexID vid = new JobVertexID();
            final ExecutionAttemptID eid = new ExecutionAttemptID();
            final SerializedValue<ExecutionConfig> executionConfig = new SerializedValue<>(new ExecutionConfig());
            final ResultPartitionDeploymentDescriptor resultPartitionDeploymentDescriptor = new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), new IntermediateResultPartitionID(), ResultPartitionType.PIPELINED, 1, 1, true);
            final TaskDeploymentDescriptor tdd = createTaskDeploymentDescriptor(jid, "TestJob", vid, eid, executionConfig, "TestTask", 1, 0, 1, 0, new Configuration(), new Configuration(), TestInvokableRecordCancel.class.getName(), Collections.singletonList(resultPartitionDeploymentDescriptor), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
            ActorRef jmActorRef = system.actorOf(Props.create(FailingScheduleOrUpdateConsumersJobManager.class, leaderSessionID), "jobmanager");
            ActorGateway jobManager = new AkkaActorGateway(jmActorRef, leaderSessionID);
            final ActorGateway taskManager = TestingUtils.createTaskManager(system, jobManager, configuration, true, true);
            try {
                TestInvokableRecordCancel.resetGotCanceledFuture();
                Future<Object> result = taskManager.ask(new SubmitTask(tdd), timeout);
                Await.result(result, timeout);
                org.apache.flink.runtime.concurrent.Future<Boolean> cancelFuture = TestInvokableRecordCancel.gotCanceled();
                assertEquals(true, cancelFuture.get());
            } finally {
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) SerializedValue(org.apache.flink.util.SerializedValue) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Aggregations

IntermediateDataSetID (org.apache.flink.runtime.jobgraph.IntermediateDataSetID)34 Test (org.junit.Test)28 JobID (org.apache.flink.api.common.JobID)25 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)22 ResultPartitionID (org.apache.flink.runtime.io.network.partition.ResultPartitionID)17 UnregisteredTaskMetricsGroup (org.apache.flink.runtime.operators.testutils.UnregisteredTaskMetricsGroup)14 TaskActions (org.apache.flink.runtime.taskmanager.TaskActions)14 SingleInputGate (org.apache.flink.runtime.io.network.partition.consumer.SingleInputGate)11 InputChannelDeploymentDescriptor (org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor)10 IOException (java.io.IOException)9 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)9 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)9 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)9 TaskEventDispatcher (org.apache.flink.runtime.io.network.TaskEventDispatcher)9 JavaTestKit (akka.testkit.JavaTestKit)8 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)8 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)8 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)7 Configuration (org.apache.flink.configuration.Configuration)7 ActorRef (akka.actor.ActorRef)6