use of org.apache.flink.kubernetes.kubeclient.KubernetesJobManagerSpecification in project flink by apache.
the class KubernetesJobManagerFactoryTest method testPodSpec.
@Test
public void testPodSpec() throws IOException {
kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(flinkPod, kubernetesJobManagerParameters);
final PodSpec resultPodSpec = this.kubernetesJobManagerSpecification.getDeployment().getSpec().getTemplate().getSpec();
assertEquals(1, resultPodSpec.getContainers().size());
assertEquals(SERVICE_ACCOUNT_NAME, resultPodSpec.getServiceAccountName());
assertEquals(3, resultPodSpec.getVolumes().size());
final Container resultedMainContainer = resultPodSpec.getContainers().get(0);
assertEquals(Constants.MAIN_CONTAINER_NAME, resultedMainContainer.getName());
assertEquals(CONTAINER_IMAGE, resultedMainContainer.getImage());
assertEquals(CONTAINER_IMAGE_PULL_POLICY.name(), resultedMainContainer.getImagePullPolicy());
assertEquals(3, resultedMainContainer.getEnv().size());
assertTrue(resultedMainContainer.getEnv().stream().anyMatch(envVar -> envVar.getName().equals("key1")));
assertEquals(3, resultedMainContainer.getPorts().size());
final Map<String, Quantity> requests = resultedMainContainer.getResources().getRequests();
assertEquals(Double.toString(JOB_MANAGER_CPU), requests.get("cpu").getAmount());
assertEquals(String.valueOf(JOB_MANAGER_MEMORY), requests.get("memory").getAmount());
assertEquals(1, resultedMainContainer.getCommand().size());
// The args list is [bash, -c, 'java -classpath $FLINK_CLASSPATH ...'].
assertEquals(3, resultedMainContainer.getArgs().size());
assertEquals(3, resultedMainContainer.getVolumeMounts().size());
}
use of org.apache.flink.kubernetes.kubeclient.KubernetesJobManagerSpecification in project flink by apache.
the class KubernetesJobManagerFactoryTest method testExistingHadoopConfigMap.
@Test
public void testExistingHadoopConfigMap() throws IOException {
flinkConfig.set(KubernetesConfigOptions.HADOOP_CONF_CONFIG_MAP, EXISTING_HADOOP_CONF_CONFIG_MAP);
kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(flinkPod, kubernetesJobManagerParameters);
assertFalse(kubernetesJobManagerSpecification.getAccompanyingResources().stream().anyMatch(resource -> resource.getMetadata().getName().equals(HadoopConfMountDecorator.getHadoopConfConfigMapName(CLUSTER_ID))));
final PodSpec podSpec = kubernetesJobManagerSpecification.getDeployment().getSpec().getTemplate().getSpec();
assertTrue(podSpec.getVolumes().stream().anyMatch(volume -> volume.getConfigMap().getName().equals(EXISTING_HADOOP_CONF_CONFIG_MAP)));
}
use of org.apache.flink.kubernetes.kubeclient.KubernetesJobManagerSpecification in project flink by apache.
the class KubernetesJobManagerFactoryTest method testHadoopConfConfigMap.
@Test
public void testHadoopConfConfigMap() throws IOException {
setHadoopConfDirEnv();
generateHadoopConfFileItems();
kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(flinkPod, kubernetesJobManagerParameters);
final ConfigMap resultConfigMap = (ConfigMap) kubernetesJobManagerSpecification.getAccompanyingResources().stream().filter(x -> x instanceof ConfigMap && x.getMetadata().getName().equals(HadoopConfMountDecorator.getHadoopConfConfigMapName(CLUSTER_ID))).collect(Collectors.toList()).get(0);
assertEquals(2, resultConfigMap.getMetadata().getLabels().size());
final Map<String, String> resultDatas = resultConfigMap.getData();
assertEquals(2, resultDatas.size());
assertEquals("some data", resultDatas.get("core-site.xml"));
assertEquals("some data", resultDatas.get("hdfs-site.xml"));
}
use of org.apache.flink.kubernetes.kubeclient.KubernetesJobManagerSpecification in project flink by apache.
the class KubernetesJobManagerFactoryTest method testServices.
@Test
public void testServices() throws IOException {
kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(flinkPod, kubernetesJobManagerParameters);
final List<Service> resultServices = this.kubernetesJobManagerSpecification.getAccompanyingResources().stream().filter(x -> x instanceof Service).map(x -> (Service) x).collect(Collectors.toList());
assertEquals(2, resultServices.size());
final List<Service> internalServiceCandidates = resultServices.stream().filter(x -> x.getMetadata().getName().equals(InternalServiceDecorator.getInternalServiceName(CLUSTER_ID))).collect(Collectors.toList());
assertEquals(1, internalServiceCandidates.size());
final List<Service> restServiceCandidates = resultServices.stream().filter(x -> x.getMetadata().getName().equals(ExternalServiceDecorator.getExternalServiceName(CLUSTER_ID))).collect(Collectors.toList());
assertEquals(1, restServiceCandidates.size());
final Service resultInternalService = internalServiceCandidates.get(0);
assertEquals(2, resultInternalService.getMetadata().getLabels().size());
assertNull(resultInternalService.getSpec().getType());
assertEquals(HeadlessClusterIPService.HEADLESS_CLUSTER_IP, resultInternalService.getSpec().getClusterIP());
assertEquals(2, resultInternalService.getSpec().getPorts().size());
assertEquals(3, resultInternalService.getSpec().getSelector().size());
final Service resultRestService = restServiceCandidates.get(0);
assertEquals(2, resultRestService.getMetadata().getLabels().size());
assertEquals("ClusterIP", resultRestService.getSpec().getType());
assertEquals(1, resultRestService.getSpec().getPorts().size());
assertEquals(3, resultRestService.getSpec().getSelector().size());
}
use of org.apache.flink.kubernetes.kubeclient.KubernetesJobManagerSpecification in project flink by apache.
the class KubernetesClusterDescriptor method deployClusterInternal.
private ClusterClientProvider<String> deployClusterInternal(String entryPoint, ClusterSpecification clusterSpecification, boolean detached) throws ClusterDeploymentException {
final ClusterEntrypoint.ExecutionMode executionMode = detached ? ClusterEntrypoint.ExecutionMode.DETACHED : ClusterEntrypoint.ExecutionMode.NORMAL;
flinkConfig.setString(ClusterEntrypoint.INTERNAL_CLUSTER_EXECUTION_MODE, executionMode.toString());
flinkConfig.setString(KubernetesConfigOptionsInternal.ENTRY_POINT_CLASS, entryPoint);
// Rpc, blob, rest, taskManagerRpc ports need to be exposed, so update them to fixed values.
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, BlobServerOptions.PORT, Constants.BLOB_SERVER_PORT);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, TaskManagerOptions.RPC_PORT, Constants.TASK_MANAGER_RPC_PORT);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, RestOptions.BIND_PORT, Constants.REST_PORT);
if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfig)) {
flinkConfig.setString(HighAvailabilityOptions.HA_CLUSTER_ID, clusterId);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, flinkConfig.get(JobManagerOptions.PORT));
}
try {
final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);
final FlinkPod podTemplate = kubernetesJobManagerParameters.getPodTemplateFilePath().map(file -> KubernetesUtils.loadPodFromTemplateFile(client, file, Constants.MAIN_CONTAINER_NAME)).orElse(new FlinkPod.Builder().build());
final KubernetesJobManagerSpecification kubernetesJobManagerSpec = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(podTemplate, kubernetesJobManagerParameters);
client.createJobManagerComponent(kubernetesJobManagerSpec);
return createClusterClientProvider(clusterId);
} catch (Exception e) {
try {
LOG.warn("Failed to create the Kubernetes cluster \"{}\", try to clean up the residual resources.", clusterId);
client.stopAndCleanupCluster(clusterId);
} catch (Exception e1) {
LOG.info("Failed to stop and clean up the Kubernetes cluster \"{}\".", clusterId, e1);
}
throw new ClusterDeploymentException("Could not create Kubernetes cluster \"" + clusterId + "\".", e);
}
}
Aggregations