use of io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder in project strimzi-kafka-operator by strimzi.
the class BundleResource method buildBundleDeployment.
public DeploymentBuilder buildBundleDeployment() {
Deployment clusterOperator = DeploymentResource.getDeploymentFromYaml(PATH_TO_CO_CONFIG);
// Get env from config file
List<EnvVar> envVars = clusterOperator.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv();
// Get default CO image
String coImage = clusterOperator.getSpec().getTemplate().getSpec().getContainers().get(0).getImage();
// Update images
for (EnvVar envVar : envVars) {
switch(envVar.getName()) {
case "STRIMZI_NAMESPACE":
envVar.setValue(namespaceToWatch);
envVar.setValueFrom(null);
break;
case "STRIMZI_FULL_RECONCILIATION_INTERVAL_MS":
envVar.setValue(Long.toString(reconciliationInterval));
break;
case "STRIMZI_OPERATION_TIMEOUT_MS":
envVar.setValue(Long.toString(operationTimeout));
break;
case "STRIMZI_FEATURE_GATES":
envVar.setValue(Environment.STRIMZI_FEATURE_GATES);
default:
if (envVar.getName().contains("KAFKA_BRIDGE_IMAGE")) {
envVar.setValue(Environment.useLatestReleasedBridge() ? envVar.getValue() : Environment.BRIDGE_IMAGE);
} else if (envVar.getName().contains("STRIMZI_DEFAULT")) {
envVar.setValue(StUtils.changeOrgAndTag(envVar.getValue()));
} else if (envVar.getName().contains("IMAGES")) {
envVar.setValue(StUtils.changeOrgAndTagInImageMap(envVar.getValue()));
}
}
}
envVars.add(new EnvVar("STRIMZI_IMAGE_PULL_POLICY", Environment.COMPONENTS_IMAGE_PULL_POLICY, null));
envVars.add(new EnvVar("STRIMZI_LOG_LEVEL", Environment.STRIMZI_LOG_LEVEL, null));
if (extraEnvVars != null) {
envVars.forEach(envVar -> extraEnvVars.stream().filter(extraVar -> envVar.getName().equals(extraVar.getName())).findFirst().ifPresent(xVar -> envVar.setValue(xVar.getValue())));
}
if (Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET != null && !Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET.isEmpty()) {
// for strimzi-operator
List<LocalObjectReference> imagePullSecrets = Collections.singletonList(new LocalObjectReference(Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET));
clusterOperator.getSpec().getTemplate().getSpec().setImagePullSecrets(imagePullSecrets);
// for kafka
envVars.add(new EnvVar("STRIMZI_IMAGE_PULL_SECRETS", Environment.SYSTEM_TEST_STRIMZI_IMAGE_PULL_SECRET, null));
}
// adding custom evn vars specified by user in installation
if (extraEnvVars != null) {
envVars.addAll(extraEnvVars);
}
// Remove duplicates from envVars
List<EnvVar> envVarsWithoutDuplicates = envVars.stream().distinct().collect(Collectors.toList());
// Apply updated env variables
clusterOperator.getSpec().getTemplate().getSpec().getContainers().get(0).setEnv(envVarsWithoutDuplicates);
return new DeploymentBuilder(clusterOperator).editMetadata().withName(name).withNamespace(namespaceInstallTo).addToLabels(Constants.DEPLOYMENT_TYPE, DeploymentTypes.BundleClusterOperator.name()).endMetadata().editSpec().withNewSelector().addToMatchLabels("name", Constants.STRIMZI_DEPLOYMENT_NAME).addToMatchLabels(this.extraLabels).endSelector().editTemplate().editMetadata().addToLabels(this.extraLabels).endMetadata().editSpec().editFirstContainer().withImage(StUtils.changeOrgAndTag(coImage)).withImagePullPolicy(Environment.OPERATOR_IMAGE_PULL_POLICY).endContainer().editFirstVolume().editEmptyDir().withNewSizeLimit("2Mi").endEmptyDir().endVolume().endSpec().endTemplate().endSpec();
}
use of io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder in project flink by splunk.
the class KubernetesJobManagerFactory method createJobManagerDeployment.
private static Deployment createJobManagerDeployment(FlinkPod flinkPod, KubernetesJobManagerParameters kubernetesJobManagerParameters) {
final Container resolvedMainContainer = flinkPod.getMainContainer();
final Pod resolvedPod = new PodBuilder(flinkPod.getPodWithoutMainContainer()).editOrNewSpec().addToContainers(resolvedMainContainer).endSpec().build();
return new DeploymentBuilder().withApiVersion(Constants.APPS_API_VERSION).editOrNewMetadata().withName(KubernetesUtils.getDeploymentName(kubernetesJobManagerParameters.getClusterId())).withAnnotations(kubernetesJobManagerParameters.getAnnotations()).withLabels(kubernetesJobManagerParameters.getLabels()).withOwnerReferences(kubernetesJobManagerParameters.getOwnerReference().stream().map(e -> KubernetesOwnerReference.fromMap(e).getInternalResource()).collect(Collectors.toList())).endMetadata().editOrNewSpec().withReplicas(kubernetesJobManagerParameters.getReplicas()).editOrNewTemplate().withMetadata(resolvedPod.getMetadata()).withSpec(resolvedPod.getSpec()).endTemplate().editOrNewSelector().addToMatchLabels(kubernetesJobManagerParameters.getSelectors()).endSelector().endSpec().build();
}
use of io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder in project keycloak by keycloak.
the class KeycloakDeployment method getReconciledResource.
@Override
public Optional<HasMetadata> getReconciledResource() {
// clone not to change the base template
Deployment baseDeployment = new DeploymentBuilder(this.baseDeployment).build();
Deployment reconciledDeployment;
if (existingDeployment == null) {
Log.info("No existing Deployment found, using the default");
reconciledDeployment = baseDeployment;
} else {
Log.info("Existing Deployment found, updating specs");
reconciledDeployment = new DeploymentBuilder(existingDeployment).build();
// don't overwrite metadata, just specs
reconciledDeployment.setSpec(baseDeployment.getSpec());
// don't overwrite annotations in pod templates to support rolling restarts
if (existingDeployment.getSpec() != null && existingDeployment.getSpec().getTemplate() != null) {
mergeMaps(Optional.ofNullable(reconciledDeployment.getSpec().getTemplate().getMetadata()).map(m -> m.getAnnotations()).orElse(null), Optional.ofNullable(existingDeployment.getSpec().getTemplate().getMetadata()).map(m -> m.getAnnotations()).orElse(null), annotations -> reconciledDeployment.getSpec().getTemplate().getMetadata().setAnnotations(annotations));
}
}
return Optional.of(reconciledDeployment);
}
use of io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder in project quarkus-operator-sdk by quarkiverse.
the class DeploymentDependent method desired.
@SuppressWarnings("unchecked")
public Deployment desired(ExposedApp exposedApp, Context context) {
final var labels = (Map<String, String>) context.managedDependentResourceContext().getMandatory(LABELS_CONTEXT_KEY, Map.class);
final var name = exposedApp.getMetadata().getName();
final var spec = exposedApp.getSpec();
final var imageRef = spec.getImageRef();
final var env = spec.getEnv();
var containerBuilder = new DeploymentBuilder().withMetadata(createMetadata(exposedApp, labels)).withNewSpec().withNewSelector().withMatchLabels(labels).endSelector().withNewTemplate().withNewMetadata().withLabels(labels).endMetadata().withNewSpec().addNewContainer().withName(name).withImage(imageRef);
// add env variables
if (env != null) {
env.forEach((key, value) -> containerBuilder.addNewEnv().withName(key.toUpperCase()).withValue(value).endEnv());
}
return containerBuilder.addNewPort().withName("http").withProtocol("TCP").withContainerPort(8080).endPort().endContainer().endSpec().endTemplate().endSpec().build();
}
use of io.fabric8.kubernetes.api.model.extensions.DeploymentBuilder in project strimzi-kafka-operator by strimzi.
the class KafkaClientsTemplates method kafkaClients.
public static DeploymentBuilder kafkaClients(String namespaceName, boolean tlsListener, String kafkaClientsName, boolean hostnameVerification, String listenerName, String secretPrefix, KafkaUser... kafkaUsers) {
Map<String, String> label = new HashMap<>();
label.put(Constants.KAFKA_CLIENTS_LABEL_KEY, Constants.KAFKA_CLIENTS_LABEL_VALUE);
label.put(Constants.DEPLOYMENT_TYPE, DeploymentTypes.KafkaClients.name());
DeploymentBuilder kafkaClient = new DeploymentBuilder().withNewMetadata().withName(kafkaClientsName).withLabels(label).withNamespace(namespaceName).endMetadata().withNewSpec().withNewSelector().addToMatchLabels("app", kafkaClientsName).addToMatchLabels(label).endSelector().withReplicas(1).withNewTemplate().withNewMetadata().addToLabels("app", kafkaClientsName).addToLabels(label).endMetadata().withSpec(createClientSpec(namespaceName, tlsListener, kafkaClientsName, hostnameVerification, listenerName, secretPrefix, kafkaUsers)).endTemplate().endSpec();
return kafkaClient;
}
Aggregations