use of org.apache.flink.runtime.jobmaster.JobResult in project flink by apache.
the class SchedulingITCase method executeSchedulingTest.
private void executeSchedulingTest(Configuration configuration) throws Exception {
configuration.setString(RestOptions.BIND_PORT, "0");
final long slotIdleTimeout = 50L;
configuration.setLong(JobManagerOptions.SLOT_IDLE_TIMEOUT, slotIdleTimeout);
configuration.set(TaskManagerOptions.TOTAL_FLINK_MEMORY, MemorySize.parse("1g"));
final int parallelism = 4;
final MiniClusterConfiguration miniClusterConfiguration = new MiniClusterConfiguration.Builder().setConfiguration(configuration).setNumTaskManagers(parallelism).setNumSlotsPerTaskManager(1).build();
try (MiniCluster miniCluster = new MiniCluster(miniClusterConfiguration)) {
miniCluster.start();
MiniClusterClient miniClusterClient = new MiniClusterClient(configuration, miniCluster);
JobGraph jobGraph = createJobGraph(slotIdleTimeout << 1, parallelism);
// wait for the submission to succeed
JobID jobID = miniClusterClient.submitJob(jobGraph).get();
CompletableFuture<JobResult> resultFuture = miniClusterClient.requestJobResult(jobID);
JobResult jobResult = resultFuture.get();
assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
}
}
use of org.apache.flink.runtime.jobmaster.JobResult in project flink by apache.
the class YARNFileReplicationITCase method deployPerJob.
private void deployPerJob(Configuration configuration, JobGraph jobGraph) throws Exception {
try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(configuration)) {
yarnClusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));
final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
File testingJar = TestUtils.findFile("..", new TestUtils.TestJarFinder("flink-yarn-tests"));
jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));
try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, false).getClusterClient()) {
ApplicationId applicationId = clusterClient.getClusterId();
extraVerification(configuration, applicationId);
final CompletableFuture<JobResult> jobResultCompletableFuture = clusterClient.requestJobResult(jobGraph.getJobID());
final JobResult jobResult = jobResultCompletableFuture.get();
assertThat(jobResult, is(notNullValue()));
jobResult.getSerializedThrowable().ifPresent(serializedThrowable -> {
throw new AssertionError("Job failed", serializedThrowable.deserializeError(YARNFileReplicationITCase.class.getClassLoader()));
});
waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
}
}
}
use of org.apache.flink.runtime.jobmaster.JobResult in project flink by apache.
the class YARNITCase method deployPerJob.
private void deployPerJob(Configuration configuration, JobGraph jobGraph, boolean withDist) throws Exception {
jobGraph.setJobType(JobType.STREAMING);
try (final YarnClusterDescriptor yarnClusterDescriptor = withDist ? createYarnClusterDescriptor(configuration) : createYarnClusterDescriptorWithoutLibDir(configuration)) {
final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
File testingJar = TestUtils.findFile("..", new TestUtils.TestJarFinder("flink-yarn-tests"));
jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));
try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, false).getClusterClient()) {
for (DistributedCache.DistributedCacheEntry entry : jobGraph.getUserArtifacts().values()) {
assertTrue(String.format("The user artifacts(%s) should be remote or uploaded to remote filesystem.", entry.filePath), Utils.isRemotePath(entry.filePath));
}
ApplicationId applicationId = clusterClient.getClusterId();
final CompletableFuture<JobResult> jobResultCompletableFuture = clusterClient.requestJobResult(jobGraph.getJobID());
final JobResult jobResult = jobResultCompletableFuture.get();
assertThat(jobResult, is(notNullValue()));
assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
checkStagingDirectory(configuration, applicationId);
waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
}
}
}
Aggregations