use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class CmdJobManagerDecoratorTest method testJobManagerCommandsAndArgs.
private void testJobManagerCommandsAndArgs(String target) {
flinkConfig.set(DeploymentOptions.TARGET, target);
final FlinkPod resultFlinkPod = cmdJobManagerDecorator.decorateFlinkPod(baseFlinkPod);
final String entryCommand = flinkConfig.get(KubernetesConfigOptions.KUBERNETES_ENTRY_PATH);
assertThat(resultFlinkPod.getMainContainer().getCommand(), containsInAnyOrder(entryCommand));
List<String> flinkCommands = KubernetesUtils.getStartCommandWithBashWrapper(Constants.KUBERNETES_JOB_MANAGER_SCRIPT_PATH + " " + target);
assertThat(resultFlinkPod.getMainContainer().getArgs(), contains(flinkCommands.toArray()));
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class DecoratorWithPodTemplateTestBase method onSetup.
@Override
public final void onSetup() throws Exception {
final FlinkPod podTemplate = KubernetesUtils.loadPodFromTemplateFile(flinkKubeClient, KubernetesPodTemplateTestUtils.getPodTemplateFile(), KubernetesPodTemplateTestUtils.TESTING_MAIN_CONTAINER_NAME);
this.resultPod = getResultPod(podTemplate);
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class HadoopConfMountDecoratorTest method testHadoopConfDirectoryUnset.
@Test
public void testHadoopConfDirectoryUnset() throws IOException {
assertEquals(0, hadoopConfMountDecorator.buildAccompanyingKubernetesResources().size());
final FlinkPod resultFlinkPod = hadoopConfMountDecorator.decorateFlinkPod(baseFlinkPod);
assertEquals(baseFlinkPod.getPodWithoutMainContainer(), resultFlinkPod.getPodWithoutMainContainer());
assertEquals(baseFlinkPod.getMainContainer(), resultFlinkPod.getMainContainer());
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class HadoopConfMountDecoratorTest method testMainContainerWithHadoopConfVolumeMount.
@Test
public void testMainContainerWithHadoopConfVolumeMount() throws IOException {
setHadoopConfDirEnv();
generateHadoopConfFileItems();
final FlinkPod resultFlinkPod = hadoopConfMountDecorator.decorateFlinkPod(baseFlinkPod);
final List<VolumeMount> resultVolumeMounts = resultFlinkPod.getMainContainer().getVolumeMounts();
assertEquals(1, resultVolumeMounts.size());
final VolumeMount resultVolumeMount = resultVolumeMounts.get(0);
assertEquals(Constants.HADOOP_CONF_VOLUME, resultVolumeMount.getName());
assertEquals(Constants.HADOOP_CONF_DIR_IN_POD, resultVolumeMount.getMountPath());
final Map<String, String> expectedEnvs = new HashMap<String, String>() {
{
put(Constants.ENV_HADOOP_CONF_DIR, Constants.HADOOP_CONF_DIR_IN_POD);
}
};
final Map<String, String> resultEnvs = resultFlinkPod.getMainContainer().getEnv().stream().collect(Collectors.toMap(EnvVar::getName, EnvVar::getValue));
assertEquals(expectedEnvs, resultEnvs);
}
use of org.apache.flink.kubernetes.kubeclient.FlinkPod in project flink by apache.
the class HadoopConfMountDecoratorTest method testExistingHadoopConfigMap.
@Test
public void testExistingHadoopConfigMap() throws IOException {
flinkConfig.set(KubernetesConfigOptions.HADOOP_CONF_CONFIG_MAP, EXISTING_HADOOP_CONF_CONFIG_MAP);
assertEquals(0, hadoopConfMountDecorator.buildAccompanyingKubernetesResources().size());
final FlinkPod resultFlinkPod = hadoopConfMountDecorator.decorateFlinkPod(baseFlinkPod);
final List<Volume> volumes = resultFlinkPod.getPodWithoutMainContainer().getSpec().getVolumes();
assertTrue(volumes.stream().anyMatch(volume -> volume.getConfigMap().getName().equals(EXISTING_HADOOP_CONF_CONFIG_MAP)));
}
Aggregations