use of org.apache.flink.kubernetes.configuration.KubernetesDeploymentTarget in project flink by apache.
the class KubernetesClusterDescriptor method deployApplicationCluster.
@Override
public ClusterClientProvider<String> deployApplicationCluster(final ClusterSpecification clusterSpecification, final ApplicationConfiguration applicationConfiguration) throws ClusterDeploymentException {
if (client.getService(KubernetesService.ServiceType.REST_SERVICE, clusterId).isPresent()) {
throw new ClusterDeploymentException("The Flink cluster " + clusterId + " already exists.");
}
checkNotNull(clusterSpecification);
checkNotNull(applicationConfiguration);
final KubernetesDeploymentTarget deploymentTarget = KubernetesDeploymentTarget.fromConfig(flinkConfig);
if (KubernetesDeploymentTarget.APPLICATION != deploymentTarget) {
throw new ClusterDeploymentException("Couldn't deploy Kubernetes Application Cluster." + " Expected deployment.target=" + KubernetesDeploymentTarget.APPLICATION.getName() + " but actual one was \"" + deploymentTarget + "\"");
}
applicationConfiguration.applyToConfiguration(flinkConfig);
// No need to do pipelineJars validation if it is a PyFlink job.
if (!(PackagedProgramUtils.isPython(applicationConfiguration.getApplicationClassName()) || PackagedProgramUtils.isPython(applicationConfiguration.getProgramArguments()))) {
final List<File> pipelineJars = KubernetesUtils.checkJarFileForApplicationMode(flinkConfig);
Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar");
}
final ClusterClientProvider<String> clusterClientProvider = deployClusterInternal(KubernetesApplicationClusterEntrypoint.class.getName(), clusterSpecification, false);
try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) {
LOG.info("Create flink application cluster {} successfully, JobManager Web Interface: {}", clusterId, clusterClient.getWebInterfaceURL());
}
return clusterClientProvider;
}
Aggregations