Search in sources :

Example 16 with BlobKey

use of org.apache.flink.runtime.blob.BlobKey in project flink by apache.

the class JobGraph method uploadRequiredJarFiles.

/**
	 * Uploads the previously added user jar file to the job manager through the job manager's BLOB server.
	 *
	 * @param serverAddress
	 *        the network address of the BLOB server
	 * @param blobClientConfig
	 *        the blob client configuration
	 * @throws IOException
	 *         thrown if an I/O error occurs during the upload
	 */
public void uploadRequiredJarFiles(InetSocketAddress serverAddress, Configuration blobClientConfig) throws IOException {
    if (this.userJars.isEmpty()) {
        return;
    }
    BlobClient bc = null;
    try {
        bc = new BlobClient(serverAddress, blobClientConfig);
        for (final Path jar : this.userJars) {
            final FileSystem fs = jar.getFileSystem();
            FSDataInputStream is = null;
            try {
                is = fs.open(jar);
                final BlobKey key = bc.put(is);
                this.userJarBlobKeys.add(key);
            } finally {
                if (is != null) {
                    is.close();
                }
            }
        }
    } finally {
        if (bc != null) {
            bc.close();
        }
    }
}
Also used : Path(org.apache.flink.core.fs.Path) BlobKey(org.apache.flink.runtime.blob.BlobKey) BlobClient(org.apache.flink.runtime.blob.BlobClient) FileSystem(org.apache.flink.core.fs.FileSystem) FSDataInputStream(org.apache.flink.core.fs.FSDataInputStream)

Example 17 with BlobKey

use of org.apache.flink.runtime.blob.BlobKey in project flink by apache.

the class ClassLoaderITCase method testDisposeSavepointWithCustomKvState.

/**
	 * Tests disposal of a savepoint, which contains custom user code KvState.
	 */
@Test
public void testDisposeSavepointWithCustomKvState() throws Exception {
    Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow();
    int port = testCluster.getLeaderRPCPort();
    File checkpointDir = FOLDER.newFolder();
    File outputDir = FOLDER.newFolder();
    final PackagedProgram program = new PackagedProgram(new File(CUSTOM_KV_STATE_JAR_PATH), new String[] { CUSTOM_KV_STATE_JAR_PATH, "localhost", String.valueOf(port), String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString() });
    // Execute detached
    Thread invokeThread = new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                program.invokeInteractiveModeForExecution();
            } catch (ProgramInvocationException ignored) {
                ignored.printStackTrace();
            }
        }
    });
    LOG.info("Starting program invoke thread");
    invokeThread.start();
    // The job ID
    JobID jobId = null;
    ActorGateway jm = testCluster.getLeaderGateway(deadline.timeLeft());
    LOG.info("Waiting for job status running.");
    // Wait for running job
    while (jobId == null && deadline.hasTimeLeft()) {
        Future<Object> jobsFuture = jm.ask(JobManagerMessages.getRequestRunningJobsStatus(), deadline.timeLeft());
        RunningJobsStatus runningJobs = (RunningJobsStatus) Await.result(jobsFuture, deadline.timeLeft());
        for (JobStatusMessage runningJob : runningJobs.getStatusMessages()) {
            jobId = runningJob.getJobId();
            LOG.info("Job running. ID: " + jobId);
            break;
        }
        // Retry if job is not available yet
        if (jobId == null) {
            Thread.sleep(100);
        }
    }
    LOG.info("Wait for all tasks to be running.");
    Future<Object> allRunning = jm.ask(new WaitForAllVerticesToBeRunning(jobId), deadline.timeLeft());
    Await.ready(allRunning, deadline.timeLeft());
    LOG.info("All tasks are running.");
    // Trigger savepoint
    String savepointPath = null;
    for (int i = 0; i < 20; i++) {
        LOG.info("Triggering savepoint (" + (i + 1) + "/20).");
        Future<Object> savepointFuture = jm.ask(new TriggerSavepoint(jobId, Option.<String>empty()), deadline.timeLeft());
        Object savepointResponse = Await.result(savepointFuture, deadline.timeLeft());
        if (savepointResponse.getClass() == TriggerSavepointSuccess.class) {
            savepointPath = ((TriggerSavepointSuccess) savepointResponse).savepointPath();
            LOG.info("Triggered savepoint. Path: " + savepointPath);
        } else if (savepointResponse.getClass() == JobManagerMessages.TriggerSavepointFailure.class) {
            Throwable cause = ((JobManagerMessages.TriggerSavepointFailure) savepointResponse).cause();
            LOG.info("Failed to trigger savepoint. Retrying...", cause);
            // This can fail if the operators are not opened yet
            Thread.sleep(500);
        } else {
            throw new IllegalStateException("Unexpected response to TriggerSavepoint");
        }
    }
    assertNotNull("Failed to trigger savepoint", savepointPath);
    // Upload JAR
    LOG.info("Uploading JAR " + CUSTOM_KV_STATE_JAR_PATH + " for savepoint disposal.");
    List<BlobKey> blobKeys = BlobClient.uploadJarFiles(jm, deadline.timeLeft(), testCluster.userConfiguration(), Collections.singletonList(new Path(CUSTOM_KV_STATE_JAR_PATH)));
    // Dispose savepoint
    LOG.info("Disposing savepoint at " + savepointPath);
    Future<Object> disposeFuture = jm.ask(new DisposeSavepoint(savepointPath), deadline.timeLeft());
    Object disposeResponse = Await.result(disposeFuture, deadline.timeLeft());
    if (disposeResponse.getClass() == JobManagerMessages.getDisposeSavepointSuccess().getClass()) {
        // Success :-)
        LOG.info("Disposed savepoint at " + savepointPath);
    } else if (disposeResponse instanceof DisposeSavepointFailure) {
        throw new IllegalStateException("Failed to dispose savepoint " + disposeResponse);
    } else {
        throw new IllegalStateException("Unexpected response to DisposeSavepoint");
    }
}
Also used : DisposeSavepointFailure(org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepointFailure) RunningJobsStatus(org.apache.flink.runtime.messages.JobManagerMessages.RunningJobsStatus) PackagedProgram(org.apache.flink.client.program.PackagedProgram) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) Path(org.apache.flink.core.fs.Path) WaitForAllVerticesToBeRunning(org.apache.flink.runtime.testingUtils.TestingJobManagerMessages.WaitForAllVerticesToBeRunning) Deadline(scala.concurrent.duration.Deadline) JobManagerMessages(org.apache.flink.runtime.messages.JobManagerMessages) FiniteDuration(scala.concurrent.duration.FiniteDuration) TriggerSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.TriggerSavepoint) DisposeSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint) DisposeSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.DisposeSavepoint) JobStatusMessage(org.apache.flink.runtime.client.JobStatusMessage) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) TriggerSavepoint(org.apache.flink.runtime.messages.JobManagerMessages.TriggerSavepoint) File(java.io.File) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 18 with BlobKey

use of org.apache.flink.runtime.blob.BlobKey in project flink by apache.

the class TaskManagerTest method testGateChannelEdgeMismatch.

@Test
public void testGateChannelEdgeMismatch() {
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                ActorRef jm = system.actorOf(Props.create(SimpleJobManager.class, leaderSessionID));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
                final ActorGateway tm = taskManager;
                final JobID jid = new JobID();
                JobVertexID vid1 = new JobVertexID();
                JobVertexID vid2 = new JobVertexID();
                final ExecutionAttemptID eid1 = new ExecutionAttemptID();
                final ExecutionAttemptID eid2 = new ExecutionAttemptID();
                final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.Receiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                new Within(d) {

                    @Override
                    protected void run() {
                        try {
                            tm.tell(new SubmitTask(tdd1), testActorGateway);
                            tm.tell(new SubmitTask(tdd2), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            expectMsgEquals(Acknowledge.get());
                            tm.tell(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), testActorGateway);
                            tm.tell(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), testActorGateway);
                            expectMsgEquals(true);
                            expectMsgEquals(true);
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(0, tasks.size());
                        } catch (Exception e) {
                            e.printStackTrace();
                            fail(e.getMessage());
                        }
                    }
                };
            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            } finally {
                // shut down the actors
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) StopTask(org.apache.flink.runtime.messages.TaskMessages.StopTask) CancelTask(org.apache.flink.runtime.messages.TaskMessages.CancelTask) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) Test(org.junit.Test)

Example 19 with BlobKey

use of org.apache.flink.runtime.blob.BlobKey in project flink by apache.

the class TaskManagerTest method testJobSubmissionAndStop.

@Test
public void testJobSubmissionAndStop() throws Exception {
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                ActorRef jm = system.actorOf(Props.create(SimpleJobManager.class, leaderSessionID));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
                final JobID jid1 = new JobID();
                final JobID jid2 = new JobID();
                JobVertexID vid1 = new JobVertexID();
                JobVertexID vid2 = new JobVertexID();
                final ExecutionAttemptID eid1 = new ExecutionAttemptID();
                final ExecutionAttemptID eid2 = new ExecutionAttemptID();
                final SerializedValue<ExecutionConfig> executionConfig = new SerializedValue<>(new ExecutionConfig());
                final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid1, "TestJob", vid1, eid1, executionConfig, "TestTask1", 5, 1, 5, 0, new Configuration(), new Configuration(), StoppableInvokable.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid2, "TestJob", vid2, eid2, executionConfig, "TestTask2", 7, 2, 7, 0, new Configuration(), new Configuration(), TestInvokableBlockingCancelable.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                final ActorGateway tm = taskManager;
                new Within(d) {

                    @Override
                    protected void run() {
                        try {
                            Future<Object> t1Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid1), timeout);
                            Future<Object> t2Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid2), timeout);
                            tm.tell(new SubmitTask(tdd1), testActorGateway);
                            tm.tell(new SubmitTask(tdd2), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            expectMsgEquals(Acknowledge.get());
                            Await.ready(t1Running, d);
                            Await.ready(t2Running, d);
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            Map<ExecutionAttemptID, Task> runningTasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(2, runningTasks.size());
                            Task t1 = runningTasks.get(eid1);
                            Task t2 = runningTasks.get(eid2);
                            assertNotNull(t1);
                            assertNotNull(t2);
                            assertEquals(ExecutionState.RUNNING, t1.getExecutionState());
                            assertEquals(ExecutionState.RUNNING, t2.getExecutionState());
                            tm.tell(new StopTask(eid1), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), timeout);
                            Await.ready(response, d);
                            assertEquals(ExecutionState.FINISHED, t1.getExecutionState());
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            runningTasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(1, runningTasks.size());
                            tm.tell(new StopTask(eid1), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            tm.tell(new StopTask(eid2), testActorGateway);
                            expectMsgClass(Status.Failure.class);
                            assertEquals(ExecutionState.RUNNING, t2.getExecutionState());
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            runningTasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(1, runningTasks.size());
                        } catch (Exception e) {
                            e.printStackTrace();
                            fail(e.getMessage());
                        }
                    }
                };
            } finally {
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) StopTask(org.apache.flink.runtime.messages.TaskMessages.StopTask) CancelTask(org.apache.flink.runtime.messages.TaskMessages.CancelTask) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) StopTask(org.apache.flink.runtime.messages.TaskMessages.StopTask) StoppableInvokable(org.apache.flink.runtime.testutils.StoppableInvokable) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) Status(akka.actor.Status) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) SerializedValue(org.apache.flink.util.SerializedValue) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) Test(org.junit.Test)

Example 20 with BlobKey

use of org.apache.flink.runtime.blob.BlobKey in project flink by apache.

the class TaskManagerTest method testRunJobWithForwardChannel.

@Test
public void testRunJobWithForwardChannel() {
    new JavaTestKit(system) {

        {
            ActorGateway jobManager = null;
            ActorGateway taskManager = null;
            final ActorGateway testActorGateway = new AkkaActorGateway(getTestActor(), leaderSessionID);
            try {
                final JobID jid = new JobID();
                JobVertexID vid1 = new JobVertexID();
                JobVertexID vid2 = new JobVertexID();
                final ExecutionAttemptID eid1 = new ExecutionAttemptID();
                final ExecutionAttemptID eid2 = new ExecutionAttemptID();
                ActorRef jm = system.actorOf(Props.create(new SimpleLookupJobManagerCreator(leaderSessionID)));
                jobManager = new AkkaActorGateway(jm, leaderSessionID);
                taskManager = TestingUtils.createTaskManager(system, jobManager, new Configuration(), true, true);
                final ActorGateway tm = taskManager;
                IntermediateResultPartitionID partitionId = new IntermediateResultPartitionID();
                List<ResultPartitionDeploymentDescriptor> irpdd = new ArrayList<ResultPartitionDeploymentDescriptor>();
                irpdd.add(new ResultPartitionDeploymentDescriptor(new IntermediateDataSetID(), partitionId, ResultPartitionType.PIPELINED, 1, 1, true));
                InputGateDeploymentDescriptor ircdd = new InputGateDeploymentDescriptor(new IntermediateDataSetID(), ResultPartitionType.PIPELINED, 0, new InputChannelDeploymentDescriptor[] { new InputChannelDeploymentDescriptor(new ResultPartitionID(partitionId, eid1), ResultPartitionLocation.createLocal()) });
                final TaskDeploymentDescriptor tdd1 = createTaskDeploymentDescriptor(jid, "TestJob", vid1, eid1, new SerializedValue<>(new ExecutionConfig()), "Sender", 1, 0, 1, 0, new Configuration(), new Configuration(), Tasks.Sender.class.getName(), irpdd, Collections.<InputGateDeploymentDescriptor>emptyList(), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                final TaskDeploymentDescriptor tdd2 = createTaskDeploymentDescriptor(jid, "TestJob", vid2, eid2, new SerializedValue<>(new ExecutionConfig()), "Receiver", 7, 2, 7, 0, new Configuration(), new Configuration(), Tasks.Receiver.class.getName(), Collections.<ResultPartitionDeploymentDescriptor>emptyList(), Collections.singletonList(ircdd), new ArrayList<BlobKey>(), Collections.<URL>emptyList(), 0);
                new Within(d) {

                    @Override
                    protected void run() {
                        try {
                            Future<Object> t1Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid1), timeout);
                            Future<Object> t2Running = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskIsRunning(eid2), timeout);
                            // submit the sender task
                            tm.tell(new SubmitTask(tdd1), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            // wait until the sender task is running
                            Await.ready(t1Running, d);
                            // only now (after the sender is running), submit the receiver task
                            tm.tell(new SubmitTask(tdd2), testActorGateway);
                            expectMsgEquals(Acknowledge.get());
                            // wait until the receiver task is running
                            Await.ready(t2Running, d);
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            Map<ExecutionAttemptID, Task> tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            Task t1 = tasks.get(eid1);
                            Task t2 = tasks.get(eid2);
                            // we get to the check, so we need to guard the check
                            if (t1 != null) {
                                Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid1), timeout);
                                Await.ready(response, d);
                            }
                            if (t2 != null) {
                                Future<Object> response = tm.ask(new TestingTaskManagerMessages.NotifyWhenTaskRemoved(eid2), timeout);
                                Await.ready(response, d);
                                assertEquals(ExecutionState.FINISHED, t2.getExecutionState());
                            }
                            tm.tell(TestingTaskManagerMessages.getRequestRunningTasksMessage(), testActorGateway);
                            tasks = expectMsgClass(TestingTaskManagerMessages.ResponseRunningTasks.class).asJava();
                            assertEquals(0, tasks.size());
                        } catch (Exception e) {
                            e.printStackTrace();
                            fail(e.getMessage());
                        }
                    }
                };
            } catch (Exception e) {
                e.printStackTrace();
                fail(e.getMessage());
            } finally {
                // shut down the actors
                TestingUtils.stopActor(taskManager);
                TestingUtils.stopActor(jobManager);
            }
        }
    };
}
Also used : AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) StopTask(org.apache.flink.runtime.messages.TaskMessages.StopTask) CancelTask(org.apache.flink.runtime.messages.TaskMessages.CancelTask) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) ResultPartitionDeploymentDescriptor(org.apache.flink.runtime.deployment.ResultPartitionDeploymentDescriptor) TaskManagerServicesConfiguration(org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration) Configuration(org.apache.flink.configuration.Configuration) ActorRef(akka.actor.ActorRef) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) ArrayList(java.util.ArrayList) ExecutionConfig(org.apache.flink.api.common.ExecutionConfig) BlobKey(org.apache.flink.runtime.blob.BlobKey) ActorGateway(org.apache.flink.runtime.instance.ActorGateway) AkkaActorGateway(org.apache.flink.runtime.instance.AkkaActorGateway) ResultPartitionID(org.apache.flink.runtime.io.network.partition.ResultPartitionID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) TaskDeploymentDescriptor(org.apache.flink.runtime.deployment.TaskDeploymentDescriptor) SubmitTask(org.apache.flink.runtime.messages.TaskMessages.SubmitTask) TestingTaskManagerMessages(org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) InputGateDeploymentDescriptor(org.apache.flink.runtime.deployment.InputGateDeploymentDescriptor) PartitionNotFoundException(org.apache.flink.runtime.io.network.partition.PartitionNotFoundException) IOException(java.io.IOException) InputChannelDeploymentDescriptor(org.apache.flink.runtime.deployment.InputChannelDeploymentDescriptor) IntermediateDataSetID(org.apache.flink.runtime.jobgraph.IntermediateDataSetID) JavaTestKit(akka.testkit.JavaTestKit) JobID(org.apache.flink.api.common.JobID) IntermediateResultPartitionID(org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID) Test(org.junit.Test)

Aggregations

BlobKey (org.apache.flink.runtime.blob.BlobKey)22 Test (org.junit.Test)15 IOException (java.io.IOException)13 JobID (org.apache.flink.api.common.JobID)13 Configuration (org.apache.flink.configuration.Configuration)13 ExecutionAttemptID (org.apache.flink.runtime.executiongraph.ExecutionAttemptID)10 ActorGateway (org.apache.flink.runtime.instance.ActorGateway)10 ExecutionConfig (org.apache.flink.api.common.ExecutionConfig)9 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)8 JavaTestKit (akka.testkit.JavaTestKit)7 TaskDeploymentDescriptor (org.apache.flink.runtime.deployment.TaskDeploymentDescriptor)7 AkkaActorGateway (org.apache.flink.runtime.instance.AkkaActorGateway)7 SubmitTask (org.apache.flink.runtime.messages.TaskMessages.SubmitTask)7 TaskManagerServicesConfiguration (org.apache.flink.runtime.taskexecutor.TaskManagerServicesConfiguration)7 ActorRef (akka.actor.ActorRef)6 File (java.io.File)6 InetSocketAddress (java.net.InetSocketAddress)6 ArrayList (java.util.ArrayList)6 TestingTaskManagerMessages (org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages)6 BlobClient (org.apache.flink.runtime.blob.BlobClient)5