use of org.apache.flink.yarn.YarnClusterDescriptor in project flink by apache.
the class FlinkYarnSessionCli method run.
public int run(String[] args) throws CliArgsException, FlinkException {
//
// Command Line Options
//
final CommandLine cmd = parseCommandLineOptions(args, true);
if (cmd.hasOption(help.getOpt())) {
printUsage();
return 0;
}
final Configuration effectiveConfiguration = new Configuration(configuration);
final Configuration commandLineConfiguration = toConfiguration(cmd);
effectiveConfiguration.addAll(commandLineConfiguration);
LOG.debug("Effective configuration: {}", effectiveConfiguration);
final ClusterClientFactory<ApplicationId> yarnClusterClientFactory = clusterClientServiceLoader.getClusterClientFactory(effectiveConfiguration);
effectiveConfiguration.set(DeploymentOptions.TARGET, YarnDeploymentTarget.SESSION.getName());
final YarnClusterDescriptor yarnClusterDescriptor = (YarnClusterDescriptor) yarnClusterClientFactory.createClusterDescriptor(effectiveConfiguration);
try {
// Query cluster for metrics
if (cmd.hasOption(query.getOpt())) {
final String description = yarnClusterDescriptor.getClusterDescription();
System.out.println(description);
return 0;
} else {
final ClusterClientProvider<ApplicationId> clusterClientProvider;
final ApplicationId yarnApplicationId;
if (cmd.hasOption(applicationId.getOpt())) {
yarnApplicationId = ConverterUtils.toApplicationId(cmd.getOptionValue(applicationId.getOpt()));
clusterClientProvider = yarnClusterDescriptor.retrieve(yarnApplicationId);
} else {
final ClusterSpecification clusterSpecification = yarnClusterClientFactory.getClusterSpecification(effectiveConfiguration);
clusterClientProvider = yarnClusterDescriptor.deploySessionCluster(clusterSpecification);
ClusterClient<ApplicationId> clusterClient = clusterClientProvider.getClusterClient();
// ------------------ ClusterClient deployed, handle connection details
yarnApplicationId = clusterClient.getClusterId();
try {
System.out.println("JobManager Web Interface: " + clusterClient.getWebInterfaceURL());
writeYarnPropertiesFile(yarnApplicationId, dynamicPropertiesEncoded);
} catch (Exception e) {
try {
clusterClient.close();
} catch (Exception ex) {
LOG.info("Could not properly shutdown cluster client.", ex);
}
try {
yarnClusterDescriptor.killCluster(yarnApplicationId);
} catch (FlinkException fe) {
LOG.info("Could not properly terminate the Flink cluster.", fe);
}
throw new FlinkException("Could not write the Yarn connection information.", e);
}
}
if (!effectiveConfiguration.getBoolean(DeploymentOptions.ATTACHED)) {
YarnClusterDescriptor.logDetachedClusterInformation(yarnApplicationId, LOG);
} else {
ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor();
final YarnApplicationStatusMonitor yarnApplicationStatusMonitor = new YarnApplicationStatusMonitor(yarnClusterDescriptor.getYarnClient(), yarnApplicationId, new ScheduledExecutorServiceAdapter(scheduledExecutorService));
Thread shutdownHook = ShutdownHookUtil.addShutdownHook(() -> shutdownCluster(clusterClientProvider.getClusterClient(), scheduledExecutorService, yarnApplicationStatusMonitor), getClass().getSimpleName(), LOG);
try {
runInteractiveCli(yarnApplicationStatusMonitor, acceptInteractiveInput);
} finally {
shutdownCluster(clusterClientProvider.getClusterClient(), scheduledExecutorService, yarnApplicationStatusMonitor);
if (shutdownHook != null) {
// we do not need the hook anymore as we have just tried to shutdown the
// cluster.
ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
}
tryRetrieveAndLogApplicationReport(yarnClusterDescriptor.getYarnClient(), yarnApplicationId);
}
}
}
} finally {
try {
yarnClusterDescriptor.close();
} catch (Exception e) {
LOG.info("Could not properly close the yarn cluster descriptor.", e);
}
}
return 0;
}
Aggregations