use of org.apache.flink.client.deployment.ClusterSpecification in project flink by apache.
the class YARNFileReplicationITCase method deployPerJob.
private void deployPerJob(Configuration configuration, JobGraph jobGraph) throws Exception {
try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(configuration)) {
yarnClusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));
final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
File testingJar = TestUtils.findFile("..", new TestUtils.TestJarFinder("flink-yarn-tests"));
jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));
try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, false).getClusterClient()) {
ApplicationId applicationId = clusterClient.getClusterId();
extraVerification(configuration, applicationId);
final CompletableFuture<JobResult> jobResultCompletableFuture = clusterClient.requestJobResult(jobGraph.getJobID());
final JobResult jobResult = jobResultCompletableFuture.get();
assertThat(jobResult, is(notNullValue()));
jobResult.getSerializedThrowable().ifPresent(serializedThrowable -> {
throw new AssertionError("Job failed", serializedThrowable.deserializeError(YARNFileReplicationITCase.class.getClassLoader()));
});
waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
}
}
}
use of org.apache.flink.client.deployment.ClusterSpecification in project flink by apache.
the class YARNITCase method deployPerJob.
private void deployPerJob(Configuration configuration, JobGraph jobGraph, boolean withDist) throws Exception {
jobGraph.setJobType(JobType.STREAMING);
try (final YarnClusterDescriptor yarnClusterDescriptor = withDist ? createYarnClusterDescriptor(configuration) : createYarnClusterDescriptorWithoutLibDir(configuration)) {
final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
File testingJar = TestUtils.findFile("..", new TestUtils.TestJarFinder("flink-yarn-tests"));
jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));
try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, false).getClusterClient()) {
for (DistributedCache.DistributedCacheEntry entry : jobGraph.getUserArtifacts().values()) {
assertTrue(String.format("The user artifacts(%s) should be remote or uploaded to remote filesystem.", entry.filePath), Utils.isRemotePath(entry.filePath));
}
ApplicationId applicationId = clusterClient.getClusterId();
final CompletableFuture<JobResult> jobResultCompletableFuture = clusterClient.requestJobResult(jobGraph.getJobID());
final JobResult jobResult = jobResultCompletableFuture.get();
assertThat(jobResult, is(notNullValue()));
assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
checkStagingDirectory(configuration, applicationId);
waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
}
}
}
use of org.apache.flink.client.deployment.ClusterSpecification in project flink by apache.
the class YARNApplicationITCase method deployApplication.
private void deployApplication(Configuration configuration) throws Exception {
try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(configuration)) {
final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(1024).setSlotsPerTaskManager(1).createClusterSpecification();
try (ClusterClient<ApplicationId> clusterClient = yarnClusterDescriptor.deployApplicationCluster(clusterSpecification, ApplicationConfiguration.fromConfiguration(configuration)).getClusterClient()) {
ApplicationId applicationId = clusterClient.getClusterId();
waitApplicationFinishedElseKillIt(applicationId, yarnAppTerminateTimeout, yarnClusterDescriptor, sleepIntervalInMS);
}
}
}
use of org.apache.flink.client.deployment.ClusterSpecification in project flink by apache.
the class YarnClusterDescriptorTest method testConfigOverwrite.
@Test
public void testConfigOverwrite() throws ClusterDeploymentException {
Configuration configuration = new Configuration();
// overwrite vcores in config
configuration.setInteger(YarnConfigOptions.VCORES, Integer.MAX_VALUE);
YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(configuration);
clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));
// configure slots
ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().createClusterSpecification();
try {
clusterDescriptor.deploySessionCluster(clusterSpecification);
fail("The deploy call should have failed.");
} catch (ClusterDeploymentException e) {
// we expect the cause to be an IllegalConfigurationException
if (!(e.getCause() instanceof IllegalConfigurationException)) {
throw e;
}
} finally {
clusterDescriptor.close();
}
}
use of org.apache.flink.client.deployment.ClusterSpecification in project flink by apache.
the class FlinkYarnSessionCliTest method testMemoryPropertyWithArbitraryUnit.
/**
* Tests the specifying total process memory with arbitrary unit for job manager and task
* manager.
*/
@Test
public void testMemoryPropertyWithArbitraryUnit() throws Exception {
final String[] args = new String[] { "-yjm", "1g", "-ytm", "2g" };
final FlinkYarnSessionCli flinkYarnSessionCli = createFlinkYarnSessionCli();
final CommandLine commandLine = flinkYarnSessionCli.parseCommandLineOptions(args, false);
final Configuration executorConfig = flinkYarnSessionCli.toConfiguration(commandLine);
final ClusterClientFactory<ApplicationId> clientFactory = getClusterClientFactory(executorConfig);
final ClusterSpecification clusterSpecification = clientFactory.getClusterSpecification(executorConfig);
assertThat(clusterSpecification.getMasterMemoryMB(), is(1024));
assertThat(clusterSpecification.getTaskManagerMemoryMB(), is(2048));
}
Aggregations