use of io.fabric8.kubernetes.api.model.apps.DeploymentBuilder in project che by eclipse.
the class OpenShiftConnector method createOpenShiftDeployment.
private String createOpenShiftDeployment(String workspaceID, String imageName, String sanitizedContainerName, Set<String> exposedPorts, String[] envVariables, String[] volumes, boolean runContainerAsRoot) {
String deploymentName = CHE_OPENSHIFT_RESOURCES_PREFIX + workspaceID;
LOG.info("Creating OpenShift deployment {}", deploymentName);
Map<String, String> selector = Collections.singletonMap(OPENSHIFT_DEPLOYMENT_LABEL, deploymentName);
LOG.info("Adding container {} to OpenShift deployment {}", sanitizedContainerName, deploymentName);
Long UID = runContainerAsRoot ? UID_ROOT : UID_USER;
Container container = new ContainerBuilder().withName(sanitizedContainerName).withImage(imageName).withEnv(KubernetesEnvVar.getEnvFrom(envVariables)).withPorts(KubernetesContainer.getContainerPortsFrom(exposedPorts)).withImagePullPolicy(OPENSHIFT_IMAGE_PULL_POLICY_IFNOTPRESENT).withNewSecurityContext().withRunAsUser(UID).withPrivileged(true).endSecurityContext().withLivenessProbe(getLivenessProbeFrom(exposedPorts)).withVolumeMounts(getVolumeMountsFrom(volumes, workspaceID)).build();
PodSpec podSpec = new PodSpecBuilder().withContainers(container).withVolumes(getVolumesFrom(volumes, workspaceID)).withServiceAccountName(this.openShiftCheServiceAccount).build();
Deployment deployment = new DeploymentBuilder().withNewMetadata().withName(deploymentName).withNamespace(this.openShiftCheProjectName).endMetadata().withNewSpec().withReplicas(1).withNewSelector().withMatchLabels(selector).endSelector().withNewTemplate().withNewMetadata().withLabels(selector).endMetadata().withSpec(podSpec).endTemplate().endSpec().build();
deployment = openShiftClient.extensions().deployments().inNamespace(this.openShiftCheProjectName).create(deployment);
LOG.info("OpenShift deployment {} created", deploymentName);
return deployment.getMetadata().getName();
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentBuilder in project curiostack by curioswitch.
the class DeployPodTask method exec.
@TaskAction
public void exec() {
ImmutableDeploymentExtension config = getProject().getExtensions().getByType(DeploymentExtension.class);
final ImmutableDeploymentConfiguration deploymentConfig = config.getTypes().getByName(type);
ImmutableGcloudExtension gcloud = getProject().getRootProject().getExtensions().getByType(GcloudExtension.class);
ImmutableList.Builder<EnvVar> envVars = ImmutableList.<EnvVar>builder().addAll(deploymentConfig.envVars().entrySet().stream().map((entry) -> new EnvVar(entry.getKey(), entry.getValue(), null))::iterator).addAll(deploymentConfig.secretEnvVars().entrySet().stream().map((entry) -> new EnvVar(entry.getKey(), null, new EnvVarSourceBuilder().withSecretKeyRef(new SecretKeySelectorBuilder().withName(entry.getValue().get(0)).withKey(entry.getValue().get(1)).build()).build()))::iterator);
if (!deploymentConfig.envVars().containsKey("JAVA_OPTS")) {
int heapSize = deploymentConfig.jvmHeapMb();
StringBuilder javaOpts = new StringBuilder();
javaOpts.append("--add-opens java.base/jdk.internal.misc=ALL-UNNAMED ").append("--add-opens jdk.unsupported/sun.misc=ALL-UNNAMED ").append("-Xms").append(heapSize).append("m ").append("-Xmx").append(heapSize).append("m ").append("-Dconfig.resource=application-").append(type).append(".conf ").append("-Dmonitoring.stackdriverProjectId=").append(gcloud.clusterProject()).append(" ").append("-Dmonitoring.serverName=").append(deploymentConfig.deploymentName()).append(" ");
if (!deploymentConfig.request()) {
int numCpus = (int) Math.ceil(Double.parseDouble(deploymentConfig.cpu()));
int numWorkers = numCpus * 2;
javaOpts.append("-XX:ParallelGCThreads=").append(numCpus).append(" ").append("-Dcom.linecorp.armeria.numCommonWorkers=").append(numWorkers).append(" ").append("-Dio.netty.availableProcessors=").append(numCpus).append(" ");
}
if (!type.equals("prod")) {
javaOpts.append("-Dcom.linecorp.armeria.verboseExceptions=true ");
}
envVars.add(new EnvVar("JAVA_OPTS", javaOpts.toString(), null));
}
Map<String, Quantity> resources = ImmutableMap.of("cpu", new Quantity(deploymentConfig.cpu()), "memory", new Quantity(deploymentConfig.memoryMb() + "Mi"));
Deployment deployment = new DeploymentBuilder().withMetadata(new ObjectMetaBuilder().withNamespace(deploymentConfig.namespace()).withName(deploymentConfig.deploymentName()).build()).withSpec(new DeploymentSpecBuilder().withReplicas(deploymentConfig.replicas()).withStrategy(new DeploymentStrategyBuilder().withType("RollingUpdate").withRollingUpdate(new RollingUpdateDeploymentBuilder().withNewMaxUnavailable(0).build()).build()).withSelector(new LabelSelectorBuilder().withMatchLabels(ImmutableMap.of("name", deploymentConfig.deploymentName())).build()).withTemplate(new PodTemplateSpecBuilder().withMetadata(new ObjectMetaBuilder().withLabels(ImmutableMap.of("name", deploymentConfig.deploymentName(), "revision", System.getenv().getOrDefault("REVISION_ID", "none"))).withAnnotations(ImmutableMap.<String, String>builder().put("prometheus.io/scrape", "true").put("prometheus.io/scheme", "https").put("prometheus.io/path", "/internal/metrics").put("prometheus.io/port", String.valueOf(deploymentConfig.containerPort())).build()).build()).withSpec(new PodSpecBuilder().withContainers(new ContainerBuilder().withResources(new ResourceRequirementsBuilder().withLimits(!deploymentConfig.request() ? resources : ImmutableMap.of()).withRequests(deploymentConfig.request() ? resources : ImmutableMap.of()).build()).withImage(deploymentConfig.image()).withName(deploymentConfig.deploymentName()).withEnv(envVars.build()).withImagePullPolicy("Always").withReadinessProbe(createProbe(deploymentConfig, Duration.ofSeconds(5))).withLivenessProbe(createProbe(deploymentConfig, Duration.ofSeconds(15))).withPorts(ImmutableList.of(new ContainerPortBuilder().withContainerPort(deploymentConfig.containerPort()).withName("http").build())).withVolumeMounts(new VolumeMountBuilder().withName("tls").withMountPath("/etc/tls").withReadOnly(true).build(), new VolumeMountBuilder().withName("rpcacls").withMountPath("/etc/rpcacls").withReadOnly(true).build()).build()).withVolumes(new VolumeBuilder().withName("tls").withSecret(new SecretVolumeSourceBuilder().withSecretName("server-tls").build()).build(), new VolumeBuilder().withName("rpcacls").withConfigMap(new ConfigMapVolumeSourceBuilder().withName("rpcacls").build()).build()).build()).build()).build()).build();
KubernetesClient client = new DefaultKubernetesClient();
Service service = new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(deploymentConfig.deploymentName()).withNamespace(deploymentConfig.namespace()).withAnnotations(ImmutableMap.<String, String>builder().put("service.alpha.kubernetes.io/app-protocols", "{\"https\":\"HTTPS\"}").put("prometheus.io/scrape", "true").put("prometheus.io/scheme", "https").put("prometheus.io/path", "/internal/metrics").put("prometheus.io/port", String.valueOf(deploymentConfig.containerPort())).put("prometheus.io/probe", "true").build()).build()).withSpec(createServiceSpec(deploymentConfig)).build();
Map<String, Service> additionalServices = new HashMap<>();
for (String path : deploymentConfig.additionalServicePaths()) {
String sanitizedPath = path;
if (sanitizedPath.endsWith("/*")) {
sanitizedPath = sanitizedPath.substring(0, path.length() - 2);
}
String serviceName = deploymentConfig.deploymentName() + sanitizedPath.replace('/', '-');
additionalServices.put(path, new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(serviceName).withNamespace(deploymentConfig.namespace()).withAnnotations(ImmutableMap.of("service.alpha.kubernetes.io/app-protocols", "{\"https\":\"HTTPS\"}")).build()).withSpec(createServiceSpec(deploymentConfig)).build());
}
client.resource(deployment).createOrReplace();
deployService(service, client);
additionalServices.values().forEach(s -> deployService(s, client));
if (deploymentConfig.externalHost() != null) {
List<HTTPIngressPath> ingressPaths = new ArrayList<>();
additionalServices.forEach((path, s) -> ingressPaths.add(createIngressPath(path, s.getMetadata().getName(), deploymentConfig)));
ingressPaths.add(createIngressPath("/*", deploymentConfig.deploymentName(), deploymentConfig));
Ingress ingress = new IngressBuilder().withMetadata(new ObjectMetaBuilder().withNamespace(deploymentConfig.namespace()).withName(deploymentConfig.deploymentName()).withAnnotations(ImmutableMap.of("kubernetes.io/tls-acme", "true", "kubernetes.io/ingress.class", "gce")).build()).withSpec(new IngressSpecBuilder().withTls(new IngressTLSBuilder().withSecretName(deploymentConfig.deploymentName() + "-tls").withHosts(deploymentConfig.externalHost()).build()).withRules(new IngressRuleBuilder().withHost(deploymentConfig.externalHost()).withHttp(new HTTPIngressRuleValueBuilder().withPaths(ingressPaths).build()).build()).build()).build();
client.resource(ingress).createOrReplace();
}
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentBuilder in project kubernetes by ballerinax.
the class DeploymentHandler method generate.
/**
* Generate kubernetes deployment definition from annotation.
*
* @return Generated kubernetes @{@link Deployment} definition
* @throws KubernetesPluginException If an error occurs while generating artifact.
*/
public String generate() throws KubernetesPluginException {
List<ContainerPort> containerPorts = null;
if (deploymentModel.getPorts() != null) {
containerPorts = populatePorts(deploymentModel.getPorts());
}
Container container = generateContainer(deploymentModel, containerPorts);
Deployment deployment = new DeploymentBuilder().withNewMetadata().withName(deploymentModel.getName()).withNamespace(deploymentModel.getNamespace()).withLabels(deploymentModel.getLabels()).endMetadata().withNewSpec().withReplicas(deploymentModel.getReplicas()).withNewTemplate().withNewMetadata().addToLabels(deploymentModel.getLabels()).endMetadata().withNewSpec().withContainers(container).withVolumes(populateVolume(deploymentModel)).endSpec().endTemplate().endSpec().build();
try {
return SerializationUtils.dumpWithoutRuntimeStateAsYaml(deployment);
} catch (JsonProcessingException e) {
String errorMessage = "Error while parsing yaml file for deployment: " + deploymentModel.getName();
throw new KubernetesPluginException(errorMessage, e);
}
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentBuilder in project fabric8-maven-plugin by fabric8io.
the class DefaultControllerEnricher method addMissingResources.
@Override
public void addMissingResources(KubernetesListBuilder builder) {
final String name = getConfig(Config.name, MavenUtil.createDefaultResourceName(getProject()));
final ResourceConfig config = new ResourceConfig.Builder().controllerName(name).imagePullPolicy(getConfig(Config.pullPolicy)).withReplicas(Configs.asInt(getConfig(Config.replicaCount))).build();
final List<ImageConfiguration> images = getImages();
// Check if at least a replica set is added. If not add a default one
if (!KubernetesResourceUtil.checkForKind(builder, POD_CONTROLLER_KINDS)) {
// At least one image must be present, otherwise the resulting config will be invalid
if (!Lists.isNullOrEmpty(images)) {
String type = getConfig(Config.type);
if ("deployment".equalsIgnoreCase(type)) {
log.info("Adding a default Deployment");
builder.addToDeploymentItems(deployHandler.getDeployment(config, images));
} else if ("statefulSet".equalsIgnoreCase(type)) {
log.info("Adding a default StatefulSet");
builder.addToStatefulSetItems(statefulSetHandler.getStatefulSet(config, images));
} else if ("daemonSet".equalsIgnoreCase(type)) {
log.info("Adding a default DaemonSet");
builder.addToDaemonSetItems(daemonSetHandler.getDaemonSet(config, images));
} else if ("replicaSet".equalsIgnoreCase(type)) {
log.info("Adding a default ReplicaSet");
builder.addToReplicaSetItems(rsHandler.getReplicaSet(config, images));
} else if ("replicationController".equalsIgnoreCase(type)) {
log.info("Adding a default ReplicationController");
builder.addToReplicationControllerItems(rcHandler.getReplicationController(config, images));
} else if ("job".equalsIgnoreCase(type)) {
log.info("Adding a default Job");
builder.addToJobItems(jobHandler.getJob(config, images));
}
}
} else if (KubernetesResourceUtil.checkForKind(builder, "StatefulSet")) {
final StatefulSetSpec spec = statefulSetHandler.getStatefulSet(config, images).getSpec();
if (spec != null) {
builder.accept(new TypedVisitor<StatefulSetBuilder>() {
@Override
public void visit(StatefulSetBuilder statefulSetBuilder) {
statefulSetBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
mergeStatefulSetSpec(statefulSetBuilder, spec);
}
});
if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
final PodSpec podSpec = spec.getTemplate().getSpec();
builder.accept(new TypedVisitor<PodSpecBuilder>() {
@Override
public void visit(PodSpecBuilder builder) {
KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
}
});
}
}
} else {
final DeploymentSpec spec = deployHandler.getDeployment(config, images).getSpec();
if (spec != null) {
builder.accept(new TypedVisitor<DeploymentBuilder>() {
@Override
public void visit(DeploymentBuilder deploymentBuilder) {
deploymentBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
mergeDeploymentSpec(deploymentBuilder, spec);
}
});
if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
final PodSpec podSpec = spec.getTemplate().getSpec();
builder.accept(new TypedVisitor<PodSpecBuilder>() {
@Override
public void visit(PodSpecBuilder builder) {
KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
}
});
}
}
}
}
use of io.fabric8.kubernetes.api.model.apps.DeploymentBuilder in project fabric8-maven-plugin by fabric8io.
the class MergeResourceTest method testMergeDeploymentTemplateMetadata.
@Test
public void testMergeDeploymentTemplateMetadata() throws Exception {
Deployment resource = new DeploymentBuilder().withNewMetadata().withName("cheese").endMetadata().withNewSpec().withNewTemplate().withNewSpec().addNewContainer().withImage("cheese-image").endContainer().endSpec().withNewMetadata().addToAnnotations("overwriteKey", "originalValue").addToAnnotations("unchangedKey", "shouldNotChange").addToAnnotations("unchangedBlankKey", "").addToAnnotations("deletedKey", "shouldBeDeleted").endMetadata().endTemplate().endSpec().build();
Deployment override = new DeploymentBuilder().withNewMetadata().withName("cheese").endMetadata().withNewSpec().withNewTemplate().withNewSpec().addNewContainer().addToEnv(new EnvVarBuilder().withName("ENV_FOO").withValue("FOO_VALUE").build()).endContainer().endSpec().withNewMetadata().addToAnnotations("overwriteKey", "newValue").addToAnnotations("deletedKey", "").endMetadata().endTemplate().endSpec().build();
HasMetadata answer = KubernetesResourceUtil.mergeResources(resource, override, log, false);
assertNotNull(answer);
log.info("Override metadata on Deployment generated: " + KubernetesHelper.toYaml(answer));
assertThat(answer).describedAs("mergeResult").isInstanceOf(Deployment.class);
Deployment deployment = (Deployment) answer;
Map<String, String> annotations = deployment.getSpec().getTemplate().getMetadata().getAnnotations();
assertDataModified(annotations, "Deployment.spec.template.metadata.annotations");
assertDataNotModified(resource.getSpec().getTemplate().getMetadata().getAnnotations(), "Original Deployment.spec.template.metadata.annotations");
}
Aggregations