use of org.apache.flink.kubernetes.operator.crd.spec.FlinkDeploymentSpec in project flink-kubernetes-operator by apache.
the class SessionReconciler method reconcile.
@Override
public void reconcile(String operatorNamespace, FlinkDeployment flinkApp, Context context, Configuration effectiveConfig) throws Exception {
FlinkDeploymentSpec lastReconciledSpec = flinkApp.getStatus().getReconciliationStatus().getLastReconciledSpec();
if (lastReconciledSpec == null) {
flinkService.submitSessionCluster(flinkApp, effectiveConfig);
flinkApp.getStatus().setJobManagerDeploymentStatus(JobManagerDeploymentStatus.DEPLOYING);
IngressUtils.updateIngressRules(flinkApp, effectiveConfig, operatorNamespace, kubernetesClient, false);
}
boolean specChanged = !flinkApp.getSpec().equals(lastReconciledSpec);
if (specChanged) {
upgradeSessionCluster(flinkApp, effectiveConfig);
}
ReconciliationUtils.updateForSpecReconciliationSuccess(flinkApp);
}
use of org.apache.flink.kubernetes.operator.crd.spec.FlinkDeploymentSpec in project flink-kubernetes-operator by apache.
the class ApplicationReconcilerTest method testRandomJobResultStorePath.
@Test
public void testRandomJobResultStorePath() throws Exception {
TestingFlinkService flinkService = new TestingFlinkService();
ApplicationReconciler reconciler = new ApplicationReconciler(kubernetesClient, flinkService, configManager);
FlinkDeployment flinkApp = TestUtils.buildApplicationCluster();
final String haStoragePath = "file:///flink-data/ha";
flinkApp.getSpec().getFlinkConfiguration().put(HighAvailabilityOptions.HA_STORAGE_PATH.key(), haStoragePath);
ObjectMeta deployMeta = flinkApp.getMetadata();
FlinkDeploymentStatus status = flinkApp.getStatus();
FlinkDeploymentSpec spec = flinkApp.getSpec();
JobSpec jobSpec = spec.getJob();
Configuration deployConfig = configManager.getDeployConfig(deployMeta, spec);
status.getJobStatus().setState(org.apache.flink.api.common.JobStatus.FINISHED.name());
status.setJobManagerDeploymentStatus(JobManagerDeploymentStatus.READY);
reconciler.deployFlinkJob(deployMeta, jobSpec, status, deployConfig, Optional.empty(), false);
String path1 = deployConfig.get(JobResultStoreOptions.STORAGE_PATH);
Assertions.assertTrue(path1.startsWith(haStoragePath));
status.getJobStatus().setState(org.apache.flink.api.common.JobStatus.FINISHED.name());
status.setJobManagerDeploymentStatus(JobManagerDeploymentStatus.READY);
reconciler.deployFlinkJob(deployMeta, jobSpec, status, deployConfig, Optional.empty(), false);
String path2 = deployConfig.get(JobResultStoreOptions.STORAGE_PATH);
Assertions.assertTrue(path2.startsWith(haStoragePath));
assertNotEquals(path1, path2);
}
use of org.apache.flink.kubernetes.operator.crd.spec.FlinkDeploymentSpec in project flink-kubernetes-operator by apache.
the class DefaultValidator method validateDeployment.
@Override
public Optional<String> validateDeployment(FlinkDeployment deployment) {
FlinkDeploymentSpec spec = deployment.getSpec();
Map<String, String> effectiveConfig = configManager.getDefaultConfig().toMap();
if (spec.getFlinkConfiguration() != null) {
effectiveConfig.putAll(spec.getFlinkConfiguration());
}
return firstPresent(validateFlinkVersion(spec.getFlinkVersion()), validateFlinkDeploymentConfig(effectiveConfig), validateIngress(spec.getIngress(), deployment.getMetadata().getName(), deployment.getMetadata().getNamespace()), validateLogConfig(spec.getLogConfiguration()), validateJobSpec(spec.getJob(), effectiveConfig), validateJmSpec(spec.getJobManager(), effectiveConfig), validateTmSpec(spec.getTaskManager()), validateSpecChange(deployment, effectiveConfig), validateServiceAccount(spec.getServiceAccount()));
}
use of org.apache.flink.kubernetes.operator.crd.spec.FlinkDeploymentSpec in project flink-kubernetes-operator by apache.
the class FlinkOperatorITCase method buildSessionCluster.
private static FlinkDeployment buildSessionCluster() {
FlinkDeployment deployment = new FlinkDeployment();
deployment.setMetadata(new ObjectMetaBuilder().withName("test-session-cluster").withNamespace(TEST_NAMESPACE).build());
FlinkDeploymentSpec spec = new FlinkDeploymentSpec();
spec.setImage(IMAGE);
spec.setFlinkVersion(FlinkVersion.v1_15);
spec.setServiceAccount(SERVICE_ACCOUNT);
Resource resource = new Resource();
resource.setMemory("2048m");
resource.setCpu(1.0);
JobManagerSpec jm = new JobManagerSpec();
jm.setResource(resource);
jm.setReplicas(1);
spec.setJobManager(jm);
TaskManagerSpec tm = new TaskManagerSpec();
tm.setResource(resource);
spec.setTaskManager(tm);
deployment.setSpec(spec);
return deployment;
}
use of org.apache.flink.kubernetes.operator.crd.spec.FlinkDeploymentSpec in project flink-kubernetes-operator by apache.
the class ApplicationReconciler method reconcile.
@Override
public void reconcile(FlinkDeployment flinkApp, Context context) throws Exception {
ObjectMeta deployMeta = flinkApp.getMetadata();
FlinkDeploymentStatus status = flinkApp.getStatus();
ReconciliationStatus<FlinkDeploymentSpec> reconciliationStatus = status.getReconciliationStatus();
FlinkDeploymentSpec lastReconciledSpec = reconciliationStatus.deserializeLastReconciledSpec();
FlinkDeploymentSpec currentDeploySpec = flinkApp.getSpec();
JobSpec desiredJobSpec = currentDeploySpec.getJob();
Configuration deployConfig = configManager.getDeployConfig(deployMeta, currentDeploySpec);
if (lastReconciledSpec == null) {
LOG.debug("Deploying application for the first time");
deployFlinkJob(deployMeta, desiredJobSpec, status, deployConfig, Optional.ofNullable(desiredJobSpec.getInitialSavepointPath()), false);
IngressUtils.updateIngressRules(deployMeta, currentDeploySpec, deployConfig, kubernetesClient);
ReconciliationUtils.updateForSpecReconciliationSuccess(flinkApp, JobState.RUNNING);
return;
}
if (!deployConfig.getBoolean(KubernetesOperatorConfigOptions.JOB_UPGRADE_IGNORE_PENDING_SAVEPOINT) && SavepointUtils.savepointInProgress(status.getJobStatus())) {
LOG.info("Delaying job reconciliation until pending savepoint is completed.");
return;
}
Configuration observeConfig = configManager.getObserveConfig(flinkApp);
boolean specChanged = !currentDeploySpec.equals(lastReconciledSpec);
if (specChanged) {
if (newSpecIsAlreadyDeployed(flinkApp)) {
return;
}
LOG.debug("Detected spec change, starting upgrade process.");
JobState currentJobState = lastReconciledSpec.getJob().getState();
JobState desiredJobState = desiredJobSpec.getState();
JobState stateAfterReconcile = currentJobState;
if (currentJobState == JobState.RUNNING) {
if (desiredJobState == JobState.RUNNING) {
LOG.info("Upgrading/Restarting running job, suspending first...");
}
Optional<UpgradeMode> availableUpgradeMode = getAvailableUpgradeMode(flinkApp, deployConfig);
if (availableUpgradeMode.isEmpty()) {
return;
}
// We must record the upgrade mode used to the status later
desiredJobSpec.setUpgradeMode(availableUpgradeMode.get());
flinkService.cancelJob(flinkApp, availableUpgradeMode.get());
stateAfterReconcile = JobState.SUSPENDED;
}
if (currentJobState == JobState.SUSPENDED && desiredJobState == JobState.RUNNING) {
restoreJob(deployMeta, desiredJobSpec, status, deployConfig, // We decide to enforce HA based on how job was previously suspended
lastReconciledSpec.getJob().getUpgradeMode() == UpgradeMode.LAST_STATE);
stateAfterReconcile = JobState.RUNNING;
}
ReconciliationUtils.updateForSpecReconciliationSuccess(flinkApp, stateAfterReconcile);
IngressUtils.updateIngressRules(deployMeta, currentDeploySpec, deployConfig, kubernetesClient);
} else if (ReconciliationUtils.shouldRollBack(flinkService, reconciliationStatus, observeConfig)) {
rollbackApplication(flinkApp);
} else if (ReconciliationUtils.shouldRecoverDeployment(observeConfig, flinkApp)) {
recoverJmDeployment(flinkApp, observeConfig);
} else {
if (!SavepointUtils.triggerSavepointIfNeeded(flinkService, flinkApp, observeConfig)) {
LOG.info("Deployment is fully reconciled, nothing to do.");
}
}
}
Aggregations