use of io.fabric8.kubernetes.api.model.PodSpecBuilder in project che by eclipse.
the class OpenShiftConnector method createOpenShiftDeployment.
private String createOpenShiftDeployment(String workspaceID, String imageName, String sanitizedContainerName, Set<String> exposedPorts, String[] envVariables, String[] volumes, boolean runContainerAsRoot) {
String deploymentName = CHE_OPENSHIFT_RESOURCES_PREFIX + workspaceID;
LOG.info("Creating OpenShift deployment {}", deploymentName);
Map<String, String> selector = Collections.singletonMap(OPENSHIFT_DEPLOYMENT_LABEL, deploymentName);
LOG.info("Adding container {} to OpenShift deployment {}", sanitizedContainerName, deploymentName);
Long UID = runContainerAsRoot ? UID_ROOT : UID_USER;
Container container = new ContainerBuilder().withName(sanitizedContainerName).withImage(imageName).withEnv(KubernetesEnvVar.getEnvFrom(envVariables)).withPorts(KubernetesContainer.getContainerPortsFrom(exposedPorts)).withImagePullPolicy(OPENSHIFT_IMAGE_PULL_POLICY_IFNOTPRESENT).withNewSecurityContext().withRunAsUser(UID).withPrivileged(true).endSecurityContext().withLivenessProbe(getLivenessProbeFrom(exposedPorts)).withVolumeMounts(getVolumeMountsFrom(volumes, workspaceID)).build();
PodSpec podSpec = new PodSpecBuilder().withContainers(container).withVolumes(getVolumesFrom(volumes, workspaceID)).withServiceAccountName(this.openShiftCheServiceAccount).build();
Deployment deployment = new DeploymentBuilder().withNewMetadata().withName(deploymentName).withNamespace(this.openShiftCheProjectName).endMetadata().withNewSpec().withReplicas(1).withNewSelector().withMatchLabels(selector).endSelector().withNewTemplate().withNewMetadata().withLabels(selector).endMetadata().withSpec(podSpec).endTemplate().endSpec().build();
deployment = openShiftClient.extensions().deployments().inNamespace(this.openShiftCheProjectName).create(deployment);
LOG.info("OpenShift deployment {} created", deploymentName);
return deployment.getMetadata().getName();
}
use of io.fabric8.kubernetes.api.model.PodSpecBuilder in project curiostack by curioswitch.
the class DeployPodTask method exec.
@TaskAction
public void exec() {
ImmutableDeploymentExtension config = getProject().getExtensions().getByType(DeploymentExtension.class);
final ImmutableDeploymentConfiguration deploymentConfig = config.getTypes().getByName(type);
ImmutableGcloudExtension gcloud = getProject().getRootProject().getExtensions().getByType(GcloudExtension.class);
ImmutableList.Builder<EnvVar> envVars = ImmutableList.<EnvVar>builder().addAll(deploymentConfig.envVars().entrySet().stream().map((entry) -> new EnvVar(entry.getKey(), entry.getValue(), null))::iterator).addAll(deploymentConfig.secretEnvVars().entrySet().stream().map((entry) -> new EnvVar(entry.getKey(), null, new EnvVarSourceBuilder().withSecretKeyRef(new SecretKeySelectorBuilder().withName(entry.getValue().get(0)).withKey(entry.getValue().get(1)).build()).build()))::iterator);
if (!deploymentConfig.envVars().containsKey("JAVA_OPTS")) {
int heapSize = deploymentConfig.jvmHeapMb();
StringBuilder javaOpts = new StringBuilder();
javaOpts.append("--add-opens java.base/jdk.internal.misc=ALL-UNNAMED ").append("--add-opens jdk.unsupported/sun.misc=ALL-UNNAMED ").append("-Xms").append(heapSize).append("m ").append("-Xmx").append(heapSize).append("m ").append("-Dconfig.resource=application-").append(type).append(".conf ").append("-Dmonitoring.stackdriverProjectId=").append(gcloud.clusterProject()).append(" ").append("-Dmonitoring.serverName=").append(deploymentConfig.deploymentName()).append(" ");
if (!deploymentConfig.request()) {
int numCpus = (int) Math.ceil(Double.parseDouble(deploymentConfig.cpu()));
int numWorkers = numCpus * 2;
javaOpts.append("-XX:ParallelGCThreads=").append(numCpus).append(" ").append("-Dcom.linecorp.armeria.numCommonWorkers=").append(numWorkers).append(" ").append("-Dio.netty.availableProcessors=").append(numCpus).append(" ");
}
if (!type.equals("prod")) {
javaOpts.append("-Dcom.linecorp.armeria.verboseExceptions=true ");
}
envVars.add(new EnvVar("JAVA_OPTS", javaOpts.toString(), null));
}
Map<String, Quantity> resources = ImmutableMap.of("cpu", new Quantity(deploymentConfig.cpu()), "memory", new Quantity(deploymentConfig.memoryMb() + "Mi"));
Deployment deployment = new DeploymentBuilder().withMetadata(new ObjectMetaBuilder().withNamespace(deploymentConfig.namespace()).withName(deploymentConfig.deploymentName()).build()).withSpec(new DeploymentSpecBuilder().withReplicas(deploymentConfig.replicas()).withStrategy(new DeploymentStrategyBuilder().withType("RollingUpdate").withRollingUpdate(new RollingUpdateDeploymentBuilder().withNewMaxUnavailable(0).build()).build()).withSelector(new LabelSelectorBuilder().withMatchLabels(ImmutableMap.of("name", deploymentConfig.deploymentName())).build()).withTemplate(new PodTemplateSpecBuilder().withMetadata(new ObjectMetaBuilder().withLabels(ImmutableMap.of("name", deploymentConfig.deploymentName(), "revision", System.getenv().getOrDefault("REVISION_ID", "none"))).withAnnotations(ImmutableMap.<String, String>builder().put("prometheus.io/scrape", "true").put("prometheus.io/scheme", "https").put("prometheus.io/path", "/internal/metrics").put("prometheus.io/port", String.valueOf(deploymentConfig.containerPort())).build()).build()).withSpec(new PodSpecBuilder().withContainers(new ContainerBuilder().withResources(new ResourceRequirementsBuilder().withLimits(!deploymentConfig.request() ? resources : ImmutableMap.of()).withRequests(deploymentConfig.request() ? resources : ImmutableMap.of()).build()).withImage(deploymentConfig.image()).withName(deploymentConfig.deploymentName()).withEnv(envVars.build()).withImagePullPolicy("Always").withReadinessProbe(createProbe(deploymentConfig, Duration.ofSeconds(5))).withLivenessProbe(createProbe(deploymentConfig, Duration.ofSeconds(15))).withPorts(ImmutableList.of(new ContainerPortBuilder().withContainerPort(deploymentConfig.containerPort()).withName("http").build())).withVolumeMounts(new VolumeMountBuilder().withName("tls").withMountPath("/etc/tls").withReadOnly(true).build(), new VolumeMountBuilder().withName("rpcacls").withMountPath("/etc/rpcacls").withReadOnly(true).build()).build()).withVolumes(new VolumeBuilder().withName("tls").withSecret(new SecretVolumeSourceBuilder().withSecretName("server-tls").build()).build(), new VolumeBuilder().withName("rpcacls").withConfigMap(new ConfigMapVolumeSourceBuilder().withName("rpcacls").build()).build()).build()).build()).build()).build();
KubernetesClient client = new DefaultKubernetesClient();
Service service = new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(deploymentConfig.deploymentName()).withNamespace(deploymentConfig.namespace()).withAnnotations(ImmutableMap.<String, String>builder().put("service.alpha.kubernetes.io/app-protocols", "{\"https\":\"HTTPS\"}").put("prometheus.io/scrape", "true").put("prometheus.io/scheme", "https").put("prometheus.io/path", "/internal/metrics").put("prometheus.io/port", String.valueOf(deploymentConfig.containerPort())).put("prometheus.io/probe", "true").build()).build()).withSpec(createServiceSpec(deploymentConfig)).build();
Map<String, Service> additionalServices = new HashMap<>();
for (String path : deploymentConfig.additionalServicePaths()) {
String sanitizedPath = path;
if (sanitizedPath.endsWith("/*")) {
sanitizedPath = sanitizedPath.substring(0, path.length() - 2);
}
String serviceName = deploymentConfig.deploymentName() + sanitizedPath.replace('/', '-');
additionalServices.put(path, new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(serviceName).withNamespace(deploymentConfig.namespace()).withAnnotations(ImmutableMap.of("service.alpha.kubernetes.io/app-protocols", "{\"https\":\"HTTPS\"}")).build()).withSpec(createServiceSpec(deploymentConfig)).build());
}
client.resource(deployment).createOrReplace();
deployService(service, client);
additionalServices.values().forEach(s -> deployService(s, client));
if (deploymentConfig.externalHost() != null) {
List<HTTPIngressPath> ingressPaths = new ArrayList<>();
additionalServices.forEach((path, s) -> ingressPaths.add(createIngressPath(path, s.getMetadata().getName(), deploymentConfig)));
ingressPaths.add(createIngressPath("/*", deploymentConfig.deploymentName(), deploymentConfig));
Ingress ingress = new IngressBuilder().withMetadata(new ObjectMetaBuilder().withNamespace(deploymentConfig.namespace()).withName(deploymentConfig.deploymentName()).withAnnotations(ImmutableMap.of("kubernetes.io/tls-acme", "true", "kubernetes.io/ingress.class", "gce")).build()).withSpec(new IngressSpecBuilder().withTls(new IngressTLSBuilder().withSecretName(deploymentConfig.deploymentName() + "-tls").withHosts(deploymentConfig.externalHost()).build()).withRules(new IngressRuleBuilder().withHost(deploymentConfig.externalHost()).withHttp(new HTTPIngressRuleValueBuilder().withPaths(ingressPaths).build()).build()).build()).build();
client.resource(ingress).createOrReplace();
}
}
use of io.fabric8.kubernetes.api.model.PodSpecBuilder in project curiostack by curioswitch.
the class DeployDevDbPodTask method exec.
@TaskAction
public void exec() {
ImmutableDatabaseExtension config = getProject().getExtensions().getByType(DatabaseExtension.class);
PersistentVolumeClaim volumeClaim = new PersistentVolumeClaimBuilder().withMetadata(new ObjectMetaBuilder().withName(config.devDbPodName() + "-pvc").withNamespace(config.devDbPodNamespace()).build()).withSpec(new PersistentVolumeClaimSpecBuilder().withAccessModes("ReadWriteOnce").withResources(new ResourceRequirementsBuilder().withRequests(ImmutableMap.of("storage", new Quantity("5Gi"))).build()).build()).build();
Pod pod = new PodBuilder().withMetadata(new ObjectMetaBuilder().withName(config.devDbPodName()).withLabels(ImmutableMap.of("name", config.devDbPodName())).withNamespace(config.devDbPodNamespace()).build()).withSpec(new PodSpecBuilder().withContainers(new ContainerBuilder().withResources(new ResourceRequirementsBuilder().withLimits(ImmutableMap.of("cpu", new Quantity("0.1"), "memory", new Quantity("512Mi"))).build()).withImage(config.devDockerImageTag()).withName(config.devDbPodName()).withImagePullPolicy("Always").withPorts(new ContainerPortBuilder().withContainerPort(3306).withName("mysql").build()).withVolumeMounts(new VolumeMountBuilder().withName(config.devDbPodName() + "-data").withMountPath("/var/lib/mysql").build()).withArgs("--ignore-db-dir=lost+found").build()).withVolumes(new VolumeBuilder().withName(config.devDbPodName() + "-data").withPersistentVolumeClaim(new PersistentVolumeClaimVolumeSourceBuilder().withClaimName(volumeClaim.getMetadata().getName()).build()).build()).build()).build();
Service service = new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(config.devDbPodName()).withNamespace(config.devDbPodNamespace()).build()).withSpec(new ServiceSpecBuilder().withPorts(new ServicePortBuilder().withPort(3306).withTargetPort(new IntOrString(3306)).build()).withSelector(ImmutableMap.of("name", config.devDbPodName())).withType("LoadBalancer").withLoadBalancerSourceRanges(config.devDbIpRestrictions()).build()).build();
KubernetesClient client = new DefaultKubernetesClient();
try {
client.resource(volumeClaim).createOrReplace();
} catch (Exception e) {
// TODO(choko): Find a better way to idempotently setup.
// Ignore
}
try {
client.resourceList(pod).createOrReplace();
} catch (Exception e) {
// TODO(choko): Find a better way to idempotently setup.
// Ignore
}
client.resource(service).createOrReplace();
}
use of io.fabric8.kubernetes.api.model.PodSpecBuilder in project fabric8-maven-plugin by fabric8io.
the class DefaultControllerEnricher method addMissingResources.
@Override
public void addMissingResources(KubernetesListBuilder builder) {
final String name = getConfig(Config.name, MavenUtil.createDefaultResourceName(getProject()));
final ResourceConfig config = new ResourceConfig.Builder().controllerName(name).imagePullPolicy(getConfig(Config.pullPolicy)).withReplicas(Configs.asInt(getConfig(Config.replicaCount))).build();
final List<ImageConfiguration> images = getImages();
// Check if at least a replica set is added. If not add a default one
if (!KubernetesResourceUtil.checkForKind(builder, POD_CONTROLLER_KINDS)) {
// At least one image must be present, otherwise the resulting config will be invalid
if (!Lists.isNullOrEmpty(images)) {
String type = getConfig(Config.type);
if ("deployment".equalsIgnoreCase(type)) {
log.info("Adding a default Deployment");
builder.addToDeploymentItems(deployHandler.getDeployment(config, images));
} else if ("statefulSet".equalsIgnoreCase(type)) {
log.info("Adding a default StatefulSet");
builder.addToStatefulSetItems(statefulSetHandler.getStatefulSet(config, images));
} else if ("daemonSet".equalsIgnoreCase(type)) {
log.info("Adding a default DaemonSet");
builder.addToDaemonSetItems(daemonSetHandler.getDaemonSet(config, images));
} else if ("replicaSet".equalsIgnoreCase(type)) {
log.info("Adding a default ReplicaSet");
builder.addToReplicaSetItems(rsHandler.getReplicaSet(config, images));
} else if ("replicationController".equalsIgnoreCase(type)) {
log.info("Adding a default ReplicationController");
builder.addToReplicationControllerItems(rcHandler.getReplicationController(config, images));
} else if ("job".equalsIgnoreCase(type)) {
log.info("Adding a default Job");
builder.addToJobItems(jobHandler.getJob(config, images));
}
}
} else if (KubernetesResourceUtil.checkForKind(builder, "StatefulSet")) {
final StatefulSetSpec spec = statefulSetHandler.getStatefulSet(config, images).getSpec();
if (spec != null) {
builder.accept(new TypedVisitor<StatefulSetBuilder>() {
@Override
public void visit(StatefulSetBuilder statefulSetBuilder) {
statefulSetBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
mergeStatefulSetSpec(statefulSetBuilder, spec);
}
});
if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
final PodSpec podSpec = spec.getTemplate().getSpec();
builder.accept(new TypedVisitor<PodSpecBuilder>() {
@Override
public void visit(PodSpecBuilder builder) {
KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
}
});
}
}
} else {
final DeploymentSpec spec = deployHandler.getDeployment(config, images).getSpec();
if (spec != null) {
builder.accept(new TypedVisitor<DeploymentBuilder>() {
@Override
public void visit(DeploymentBuilder deploymentBuilder) {
deploymentBuilder.editOrNewSpec().editOrNewTemplate().editOrNewSpec().endSpec().endTemplate().endSpec();
mergeDeploymentSpec(deploymentBuilder, spec);
}
});
if (spec.getTemplate() != null && spec.getTemplate().getSpec() != null) {
final PodSpec podSpec = spec.getTemplate().getSpec();
builder.accept(new TypedVisitor<PodSpecBuilder>() {
@Override
public void visit(PodSpecBuilder builder) {
KubernetesResourceUtil.mergePodSpec(builder, podSpec, name);
}
});
}
}
}
}
use of io.fabric8.kubernetes.api.model.PodSpecBuilder in project fabric8-maven-plugin by fabric8io.
the class KubernetesResourceUtil method mergeDeployments.
protected static HasMetadata mergeDeployments(Deployment resource1, Deployment resource2, Logger log, boolean switchOnLocalCustomisation) {
Deployment resource1OrCopy = resource1;
if (!switchOnLocalCustomisation) {
// lets copy the original to avoid modifying it
resource1OrCopy = new DeploymentBuilder(resource1OrCopy).build();
}
HasMetadata answer = resource1OrCopy;
DeploymentSpec spec1 = resource1OrCopy.getSpec();
DeploymentSpec spec2 = resource2.getSpec();
if (spec1 == null) {
resource1OrCopy.setSpec(spec2);
} else {
PodTemplateSpec template1 = spec1.getTemplate();
PodTemplateSpec template2 = null;
if (spec2 != null) {
template2 = spec2.getTemplate();
}
if (template1 != null && template2 != null) {
mergeMetadata(template1, template2);
}
if (template1 == null) {
spec1.setTemplate(template2);
} else {
PodSpec podSpec1 = template1.getSpec();
PodSpec podSpec2 = null;
if (template2 != null) {
podSpec2 = template2.getSpec();
}
if (podSpec1 == null) {
template1.setSpec(podSpec2);
} else {
String defaultName = null;
PodTemplateSpec updateTemplate = template1;
if (switchOnLocalCustomisation) {
HasMetadata override = resource2;
if (isLocalCustomisation(podSpec1)) {
updateTemplate = template2;
PodSpec tmp = podSpec1;
podSpec1 = podSpec2;
podSpec2 = tmp;
} else {
answer = resource2;
override = resource1OrCopy;
}
mergeMetadata(answer, override);
} else {
mergeMetadata(resource1OrCopy, resource2);
}
if (updateTemplate != null) {
if (podSpec2 == null) {
updateTemplate.setSpec(podSpec1);
} else {
PodSpecBuilder podSpecBuilder = new PodSpecBuilder(podSpec1);
mergePodSpec(podSpecBuilder, podSpec2, defaultName);
updateTemplate.setSpec(podSpecBuilder.build());
}
}
return answer;
}
}
}
log.info("Merging 2 resources for " + getKind(resource1OrCopy) + " " + getName(resource1OrCopy) + " from " + getSourceUrlAnnotation(resource1OrCopy) + " and " + getSourceUrlAnnotation(resource2) + " and removing " + getSourceUrlAnnotation(resource1OrCopy));
return resource1OrCopy;
}
Aggregations