Search in sources :

Example 56 with JobResult

use of org.apache.flink.runtime.jobmaster.JobResult in project flink by apache.

the class SchedulingITCase method executeSchedulingTest.

private void executeSchedulingTest(Configuration configuration) throws Exception {
    configuration.setString(RestOptions.BIND_PORT, "0");
    final long slotIdleTimeout = 50L;
    configuration.setLong(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout);
    configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.parse("1g"));
    final int parallelism = 4;
    final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder().setConfiguration(configuration).setNumTaskManagers(parallelism).setNumSlotsPerTaskManager(1).build();
    try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) {
        miniCluster.start();
        MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster);
        JobGraph jobGraph = createJobGraph(slotIdleTimeout << 1, parallelism);
        // wait for the submission to succeed
        JobID jobID = miniClusterClient.submitJob(jobGraph).get();
        CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobID);
        JobResult jobResult = resultFuture.get();
        assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
    }
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobResult(org.apache.flink.runtime.jobmaster.JobResult) JobGraphBuilder(org.apache.flink.runtime.jobgraph.JobGraphBuilder) MiniClusterConfiguration(org.apache.flink.runtime.minicluster.MiniClusterConfiguration) MiniCluster(org.apache.flink.runtime.minicluster.MiniCluster) MiniClusterClient(org.apache.flink.client.program.MiniClusterClient) JobID(org.apache.flink.api.common.JobID)

Example 57 with JobResult

use of org.apache.flink.runtime.jobmaster.JobResult in project flink by apache.

the class YARNFileReplicationITCase method deployPerJob.

private void deployPerJob(Configuration configuration, JobGraph jobGraph) throws Exception {
    try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(configuration)) {
        yarnClusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
        yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));
        final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
        final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
        File testingJar = TestUtils.findFile("..", new TestUtils.TestJarFinder("flink-yarn-tests"));
        jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));
        try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, false).getClusterClient()) {
            ApplicationId applicationId = clusterClient.getClusterId();
            extraVerification(configuration, applicationId);
            final CompletableFuture<JobResult> jobResultCompletableFuture = clusterClient.requestJobResult(jobGraph.getJobID());
            final JobResult jobResult = jobResultCompletableFuture.get();
            assertThat(jobResult, is(notNullValue()));
            jobResult.getSerializedThrowable().ifPresent(serializedThrowable -> {
                throw new AssertionError("Job failed", serializedThrowable.deserializeError(YARNFileReplicationITCase.class.getClassLoader()));
            });
            waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) JobResult(org.apache.flink.runtime.jobmaster.JobResult) ClusterSpecification(org.apache.flink.client.deployment.ClusterSpecification) TestUtils(org.apache.flink.yarn.util.TestUtils) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File)

Example 58 with JobResult

use of org.apache.flink.runtime.jobmaster.JobResult in project flink by apache.

the class YARNITCase method deployPerJob.

private void deployPerJob(Configuration configuration, JobGraph jobGraph, boolean withDist) throws Exception {
    jobGraph.setJobType(JobType.STREAMING);
    try (final YarnClusterDescriptor yarnClusterDescriptor = withDist ? createYarnClusterDescriptor(configuration) : createYarnClusterDescriptorWithoutLibDir(configuration)) {
        final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
        final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
        File testingJar = TestUtils.findFile("..", new TestUtils.TestJarFinder("flink-yarn-tests"));
        jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));
        try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, false).getClusterClient()) {
            for (DistributedCache.DistributedCacheEntry entry : jobGraph.getUserArtifacts().values()) {
                assertTrue(String.format("The user artifacts(%s) should be remote or uploaded to remote filesystem.", entry.filePath), Utils.isRemotePath(entry.filePath));
            }
            ApplicationId applicationId = clusterClient.getClusterId();
            final CompletableFuture<JobResult> jobResultCompletableFuture = clusterClient.requestJobResult(jobGraph.getJobID());
            final JobResult jobResult = jobResultCompletableFuture.get();
            assertThat(jobResult, is(notNullValue()));
            assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
            checkStagingDirectory(configuration, applicationId);
            waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
        }
    }
}
Also used : JobResult(org.apache.flink.runtime.jobmaster.JobResult) ClusterSpecification(org.apache.flink.client.deployment.ClusterSpecification) TestUtils(org.apache.flink.yarn.util.TestUtils) DistributedCache(org.apache.flink.api.common.cache.DistributedCache) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File)

Aggregations

JobResult (org.apache.flink.runtime.jobmaster.JobResult)58 Test (org.junit.Test)28 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)25 JobID (org.apache.flink.api.common.JobID)15 Test (org.junit.jupiter.api.Test)13 MiniCluster (org.apache.flink.runtime.minicluster.MiniCluster)11 ExecutionException (java.util.concurrent.ExecutionException)8 JobSubmissionResult (org.apache.flink.api.common.JobSubmissionResult)7 Deadline (org.apache.flink.api.common.time.Deadline)7 Configuration (org.apache.flink.configuration.Configuration)7 File (java.io.File)5 JobResultStore (org.apache.flink.runtime.highavailability.JobResultStore)5 IOException (java.io.IOException)4 CompletableFuture (java.util.concurrent.CompletableFuture)4 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)4 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)4 Duration (java.time.Duration)3 List (java.util.List)3 Time (org.apache.flink.api.common.time.Time)3 MiniClusterClient (org.apache.flink.client.program.MiniClusterClient)3