use of org.apache.flink.kubernetes.operator.crd.status.FlinkDeploymentReconciliationStatus in project flink-kubernetes-operator by apache.
the class DefaultValidatorTest method testValidationWithoutDefaultConfig.
@Test
public void testValidationWithoutDefaultConfig() {
testSuccess(dep -> {
});
// Test job validation
testError(dep -> dep.getSpec().getJob().setJarURI(null), "Jar URI must be defined");
testError(dep -> dep.getSpec().getJob().setState(JobState.SUSPENDED), "Job must start in running state");
testError(dep -> dep.getSpec().getJob().setParallelism(0), "Job parallelism must be larger than 0");
testError(dep -> dep.getSpec().getJob().setParallelism(-1), "Job parallelism must be larger than 0");
testError(dep -> {
dep.getSpec().setFlinkConfiguration(new HashMap<>());
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.LAST_STATE);
}, "Job could not be upgraded with last-state while Kubernetes HA disabled");
testError(dep -> {
dep.getSpec().getFlinkConfiguration().remove(CheckpointingOptions.SAVEPOINT_DIRECTORY.key());
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.SAVEPOINT);
}, String.format("Job could not be upgraded with savepoint while config key[%s] is not set", CheckpointingOptions.SAVEPOINT_DIRECTORY.key()));
testError(dep -> {
dep.getSpec().getFlinkConfiguration().remove(CheckpointingOptions.CHECKPOINTS_DIRECTORY.key());
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.SAVEPOINT);
}, "Checkpoint directory");
testError(dep -> {
dep.getSpec().getFlinkConfiguration().remove(CheckpointingOptions.CHECKPOINTS_DIRECTORY.key());
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.LAST_STATE);
}, "Checkpoint directory");
testSuccess(dep -> {
dep.getSpec().getFlinkConfiguration().remove(CheckpointingOptions.CHECKPOINTS_DIRECTORY.key());
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.STATELESS);
});
testError(dep -> {
dep.getSpec().setFlinkConfiguration(new HashMap<>());
dep.getSpec().getJob().setSavepointTriggerNonce(ThreadLocalRandom.current().nextLong());
}, String.format("Savepoint could not be manually triggered for the running job while config key[%s] is not set", CheckpointingOptions.SAVEPOINT_DIRECTORY.key()));
testError(dep -> {
dep.getSpec().setFlinkConfiguration(Map.of(KubernetesOperatorConfigOptions.PERIODIC_SAVEPOINT_INTERVAL.key(), "1m"));
}, String.format("Periodic savepoints cannot be enabled when config key[%s] is not set", CheckpointingOptions.SAVEPOINT_DIRECTORY.key()));
// Test conf validation
testSuccess(dep -> dep.getSpec().setFlinkConfiguration(Collections.singletonMap("random", "config")));
testError(dep -> dep.getSpec().setFlinkConfiguration(Collections.singletonMap(KubernetesConfigOptions.NAMESPACE.key(), "myns")), "Forbidden Flink config key");
// Test log config validation
testSuccess(dep -> dep.getSpec().setLogConfiguration(Map.of(Constants.CONFIG_FILE_LOG4J_NAME, "rootLogger.level = INFO")));
testError(dep -> dep.getSpec().setIngress(new IngressSpec()), "Ingress template must be defined");
testError(dep -> dep.getSpec().setIngress(IngressSpec.builder().template("example.com:port").build()), "Unable to process the Ingress template(example.com:port). Error: Error at index 0 in: \"port\"");
testSuccess(dep -> dep.getSpec().setIngress(IngressSpec.builder().template("example.com/{{namespace}}/{{name}}").build()));
testError(dep -> dep.getSpec().setLogConfiguration(Map.of("random", "config")), "Invalid log config key");
testError(dep -> {
dep.getSpec().setFlinkConfiguration(new HashMap<>());
dep.getSpec().getJobManager().setReplicas(2);
}, "Kubernetes High availability should be enabled when starting standby JobManagers.");
testError(dep -> dep.getSpec().getJobManager().setReplicas(0), "JobManager replicas should not be configured less than one.");
// Test resource validation
testSuccess(dep -> dep.getSpec().getTaskManager().getResource().setMemory("1G"));
testSuccess(dep -> dep.getSpec().getTaskManager().getResource().setMemory("100"));
testError(dep -> dep.getSpec().getTaskManager().getResource().setMemory("invalid"), "TaskManager resource memory parse error");
testError(dep -> dep.getSpec().getJobManager().getResource().setMemory("invalid"), "JobManager resource memory parse error");
testError(dep -> dep.getSpec().getTaskManager().getResource().setMemory(null), "TaskManager resource memory must be defined");
testError(dep -> dep.getSpec().getJobManager().getResource().setMemory(null), "JobManager resource memory must be defined");
// Test savepoint restore validation
testSuccess(dep -> {
dep.setStatus(new FlinkDeploymentStatus());
dep.getStatus().setJobStatus(new JobStatus());
dep.getStatus().getJobStatus().getSavepointInfo().setLastSavepoint(Savepoint.of("sp", SavepointTriggerType.UPGRADE));
dep.getStatus().setReconciliationStatus(new FlinkDeploymentReconciliationStatus());
FlinkDeploymentSpec spec = ReconciliationUtils.clone(dep.getSpec());
spec.getJob().setState(JobState.SUSPENDED);
dep.getStatus().getReconciliationStatus().serializeAndSetLastReconciledSpec(spec);
dep.getSpec().getFlinkConfiguration().put(CheckpointingOptions.SAVEPOINT_DIRECTORY.key(), "file:///flink-data/savepoints");
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.SAVEPOINT);
});
// Test cluster type validation
testError(dep -> {
dep.setStatus(new FlinkDeploymentStatus());
dep.getStatus().setJobStatus(new JobStatus());
dep.getStatus().setReconciliationStatus(new FlinkDeploymentReconciliationStatus());
dep.getStatus().getReconciliationStatus().serializeAndSetLastReconciledSpec(ReconciliationUtils.clone(dep.getSpec()));
dep.getSpec().setJob(null);
}, "Cannot switch from job to session cluster");
testError(dep -> {
dep.setStatus(new FlinkDeploymentStatus());
dep.getStatus().setJobStatus(new JobStatus());
dep.getStatus().setReconciliationStatus(new FlinkDeploymentReconciliationStatus());
FlinkDeploymentSpec spec = ReconciliationUtils.clone(dep.getSpec());
spec.setJob(null);
dep.getStatus().getReconciliationStatus().serializeAndSetLastReconciledSpec(spec);
}, "Cannot switch from session to job cluster");
// Test upgrade mode change validation
testError(dep -> {
dep.getSpec().getJob().setUpgradeMode(UpgradeMode.LAST_STATE);
dep.getSpec().getFlinkConfiguration().remove(CheckpointingOptions.SAVEPOINT_DIRECTORY.key());
dep.setStatus(new FlinkDeploymentStatus());
dep.getStatus().setJobStatus(new JobStatus());
dep.getStatus().setReconciliationStatus(new FlinkDeploymentReconciliationStatus());
FlinkDeploymentSpec spec = ReconciliationUtils.clone(dep.getSpec());
spec.getJob().setUpgradeMode(UpgradeMode.STATELESS);
spec.getFlinkConfiguration().remove(HighAvailabilityOptions.HA_MODE.key());
dep.getStatus().getReconciliationStatus().serializeAndSetLastReconciledSpec(spec);
dep.getStatus().setJobManagerDeploymentStatus(JobManagerDeploymentStatus.READY);
}, String.format("Job could not be upgraded to last-state while config key[%s] is not set", CheckpointingOptions.SAVEPOINT_DIRECTORY.key()));
testError(dep -> dep.getSpec().setFlinkVersion(null), "Flink Version must be defined.");
testSuccess(dep -> dep.getSpec().setFlinkVersion(FlinkVersion.v1_15));
testError(dep -> dep.getSpec().setServiceAccount(null), "spec.serviceAccount must be defined. If you use helm, its value should be the same with the name of jobServiceAccount.");
testSuccess(dep -> dep.getSpec().setServiceAccount("flink"));
}
Aggregations