use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class HadoopConfMountDecoratorTest method testEmptyHadoopConfDirectory.
@Test
public void testEmptyHadoopConfDirectory() throws IOException {
setHadoopConfDirEnv();
assertEquals(0, hadoopConfMountDecorator.buildAccompanyingKubernetesResources().size());
final FlinkPod resultFlinkPod = hadoopConfMountDecorator.decorateFlinkPod(baseFlinkPod);
assertEquals(baseFlinkPod.getPodWithoutMainContainer(), resultFlinkPod.getPodWithoutMainContainer());
assertEquals(baseFlinkPod.getMainContainer(), resultFlinkPod.getMainContainer());
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class KubernetesUtilsTest method testLoadPodFromTemplateAndCheckMetaData.
@Test
public void testLoadPodFromTemplateAndCheckMetaData() {
final FlinkPod flinkPod = KubernetesUtils.loadPodFromTemplateFile(flinkKubeClient, KubernetesPodTemplateTestUtils.getPodTemplateFile(), KubernetesPodTemplateTestUtils.TESTING_MAIN_CONTAINER_NAME);
// The pod name is defined in the test/resources/testing-pod-template.yaml.
final String expectedPodName = "pod-template";
assertThat(flinkPod.getPodWithoutMainContainer().getMetadata().getName(), is(expectedPodName));
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class KubernetesUtilsTest method testLoadPodFromTemplateAndCheckInitContainer.
@Test
public void testLoadPodFromTemplateAndCheckInitContainer() {
final FlinkPod flinkPod = KubernetesUtils.loadPodFromTemplateFile(flinkKubeClient, KubernetesPodTemplateTestUtils.getPodTemplateFile(), KubernetesPodTemplateTestUtils.TESTING_MAIN_CONTAINER_NAME);
assertThat(flinkPod.getPodWithoutMainContainer().getSpec().getInitContainers().size(), is(1));
assertThat(flinkPod.getPodWithoutMainContainer().getSpec().getInitContainers().get(0), is(KubernetesPodTemplateTestUtils.createInitContainer()));
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class KubernetesClusterDescriptor method deployClusterInternal.
private ClusterClientProvider<String> deployClusterInternal(String entryPoint, ClusterSpecification clusterSpecification, boolean detached) throws ClusterDeploymentException {
final ClusterEntrypoint.ExecutionMode executionMode = detached ? ClusterEntrypoint.ExecutionMode.DETACHED : ClusterEntrypoint.ExecutionMode.NORMAL;
flinkConfig.setString(ClusterEntrypoint.INTERNAL_CLUSTER_EXECUTION_MODE, executionMode.toString());
flinkConfig.setString(KubernetesConfigOptionsInternal.ENTRY_POINT_CLASS, entryPoint);
// Rpc, blob, rest, taskManagerRpc ports need to be exposed, so update them to fixed values.
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, BlobServerOptions.PORT, Constants.BLOB_SERVER_PORT);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, TaskManagerOptions.RPC_PORT, Constants.TASK_MANAGER_RPC_PORT);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, RestOptions.BIND_PORT, Constants.REST_PORT);
if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfig)) {
flinkConfig.setString(HighAvailabilityOptions.HA_CLUSTER_ID, clusterId);
KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE, flinkConfig.get(JobManagerOptions.PORT));
}
try {
final KubernetesJobManagerParameters kubernetesJobManagerParameters = new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);
final FlinkPod podTemplate = kubernetesJobManagerParameters.getPodTemplateFilePath().map(file -> KubernetesUtils.loadPodFromTemplateFile(client, file, Constants.MAIN_CONTAINER_NAME)).orElse(new FlinkPod.Builder().build());
final KubernetesJobManagerSpecification kubernetesJobManagerSpec = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(podTemplate, kubernetesJobManagerParameters);
client.createJobManagerComponent(kubernetesJobManagerSpec);
return createClusterClientProvider(clusterId);
} catch (Exception e) {
try {
LOG.warn("Failed to create the Kubernetes cluster \"{}\", try to clean up the residual resources.", clusterId);
client.stopAndCleanupCluster(clusterId);
} catch (Exception e1) {
LOG.info("Failed to stop and clean up the Kubernetes cluster \"{}\".", clusterId, e1);
}
throw new ClusterDeploymentException("Could not create Kubernetes cluster \"" + clusterId + "\".", e);
}
}
Aggregations