use of io.fabric8.kubernetes.api.model.extensions.Deployment in project che by eclipse.
the class OpenShiftConnector method createContainer.
/**
* @param createContainerParams
* @return
* @throws IOException
*/
@Override
public ContainerCreated createContainer(CreateContainerParams createContainerParams) throws IOException {
String containerName = KubernetesStringUtils.convertToContainerName(createContainerParams.getContainerName());
String workspaceID = getCheWorkspaceId(createContainerParams);
// Generate workspaceID if CHE_WORKSPACE_ID env var does not exist
workspaceID = workspaceID.isEmpty() ? KubernetesStringUtils.generateWorkspaceID() : workspaceID;
// imageForDocker is the docker version of the image repository. It's needed for other
// OpenShiftConnector API methods, but is not acceptable as an OpenShift name
String imageForDocker = createContainerParams.getContainerConfig().getImage();
// imageStreamTagName is imageForDocker converted into a form that can be used
// in OpenShift
String imageStreamTagName = KubernetesStringUtils.convertPullSpecToTagName(imageForDocker);
// imageStreamTagName is not enough to fill out a pull spec; it is only the tag, so we
// have to get the ImageStreamTag from the tag, and then get the full ImageStreamTag name
// from that tag. This works because the tags used in Che are unique.
ImageStreamTag imageStreamTag = getImageStreamTagFromRepo(imageStreamTagName);
String imageStreamTagPullSpec = imageStreamTag.getMetadata().getName();
// Next we need to get the address of the registry where the ImageStreamTag is stored
String imageStreamName = KubernetesStringUtils.getImageStreamNameFromPullSpec(imageStreamTagPullSpec);
ImageStream imageStream = openShiftClient.imageStreams().inNamespace(openShiftCheProjectName).withName(imageStreamName).get();
if (imageStream == null) {
throw new OpenShiftException("ImageStream not found");
}
String registryAddress = imageStream.getStatus().getDockerImageRepository().split("/")[0];
// The above needs to be combined to form a pull spec that will work when defining a container.
String dockerPullSpec = String.format("%s/%s/%s", registryAddress, openShiftCheProjectName, imageStreamTagPullSpec);
Set<String> containerExposedPorts = createContainerParams.getContainerConfig().getExposedPorts().keySet();
Set<String> imageExposedPorts = inspectImage(InspectImageParams.create(imageForDocker)).getConfig().getExposedPorts().keySet();
Set<String> exposedPorts = getExposedPorts(containerExposedPorts, imageExposedPorts);
boolean runContainerAsRoot = runContainerAsRoot(imageForDocker);
String[] envVariables = createContainerParams.getContainerConfig().getEnv();
String[] volumes = createContainerParams.getContainerConfig().getHostConfig().getBinds();
Map<String, String> additionalLabels = createContainerParams.getContainerConfig().getLabels();
String containerID;
try {
createOpenShiftService(workspaceID, exposedPorts, additionalLabels);
String deploymentName = createOpenShiftDeployment(workspaceID, dockerPullSpec, containerName, exposedPorts, envVariables, volumes, runContainerAsRoot);
containerID = waitAndRetrieveContainerID(deploymentName);
if (containerID == null) {
throw new OpenShiftException("Failed to get the ID of the container running in the OpenShift pod");
}
} catch (IOException e) {
// Make sure we clean up deployment and service in case of an error -- otherwise Che can end up
// in an inconsistent state.
LOG.info("Error while creating Pod, removing deployment");
String deploymentName = CHE_OPENSHIFT_RESOURCES_PREFIX + workspaceID;
cleanUpWorkspaceResources(deploymentName);
openShiftClient.resource(imageStreamTag).delete();
throw e;
}
return new ContainerCreated(containerID, null);
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project che by eclipse.
the class OpenShiftConnector method createOpenShiftDeployment.
private String createOpenShiftDeployment(String workspaceID, String imageName, String sanitizedContainerName, Set<String> exposedPorts, String[] envVariables, String[] volumes, boolean runContainerAsRoot) {
String deploymentName = CHE_OPENSHIFT_RESOURCES_PREFIX + workspaceID;
LOG.info("Creating OpenShift deployment {}", deploymentName);
Map<String, String> selector = Collections.singletonMap(OPENSHIFT_DEPLOYMENT_LABEL, deploymentName);
LOG.info("Adding container {} to OpenShift deployment {}", sanitizedContainerName, deploymentName);
Long UID = runContainerAsRoot ? UID_ROOT : UID_USER;
Container container = new ContainerBuilder().withName(sanitizedContainerName).withImage(imageName).withEnv(KubernetesEnvVar.getEnvFrom(envVariables)).withPorts(KubernetesContainer.getContainerPortsFrom(exposedPorts)).withImagePullPolicy(OPENSHIFT_IMAGE_PULL_POLICY_IFNOTPRESENT).withNewSecurityContext().withRunAsUser(UID).withPrivileged(true).endSecurityContext().withLivenessProbe(getLivenessProbeFrom(exposedPorts)).withVolumeMounts(getVolumeMountsFrom(volumes, workspaceID)).build();
PodSpec podSpec = new PodSpecBuilder().withContainers(container).withVolumes(getVolumesFrom(volumes, workspaceID)).withServiceAccountName(this.openShiftCheServiceAccount).build();
Deployment deployment = new DeploymentBuilder().withNewMetadata().withName(deploymentName).withNamespace(this.openShiftCheProjectName).endMetadata().withNewSpec().withReplicas(1).withNewSelector().withMatchLabels(selector).endSelector().withNewTemplate().withNewMetadata().withLabels(selector).endMetadata().withSpec(podSpec).endTemplate().endSpec().build();
deployment = openShiftClient.extensions().deployments().inNamespace(this.openShiftCheProjectName).create(deployment);
LOG.info("OpenShift deployment {} created", deploymentName);
return deployment.getMetadata().getName();
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project che by eclipse.
the class OpenShiftConnector method cleanUpWorkspaceResources.
private void cleanUpWorkspaceResources(String deploymentName) throws IOException {
Deployment deployment = getDeploymentByName(deploymentName);
Service service = getCheServiceBySelector(OPENSHIFT_DEPLOYMENT_LABEL, deploymentName);
if (service != null) {
LOG.info("Removing OpenShift Service {}", service.getMetadata().getName());
openShiftClient.resource(service).delete();
}
if (deployment != null) {
LOG.info("Removing OpenShift Deployment {}", deployment.getMetadata().getName());
openShiftClient.resource(deployment).delete();
}
// Wait for all pods to terminate before returning.
try {
for (int waitCount = 0; waitCount < OPENSHIFT_WAIT_POD_TIMEOUT; waitCount++) {
List<Pod> pods = openShiftClient.pods().inNamespace(openShiftCheProjectName).withLabel(OPENSHIFT_DEPLOYMENT_LABEL, deploymentName).list().getItems();
if (pods.size() == 0) {
return;
}
Thread.sleep(OPENSHIFT_WAIT_POD_DELAY);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Thread interrupted while cleaning up workspace");
}
throw new OpenShiftException("Timeout while waiting for pods to terminate");
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project curiostack by curioswitch.
the class DeployPodTask method exec.
@TaskAction
public void exec() {
ImmutableDeploymentExtension config = getProject().getExtensions().getByType(DeploymentExtension.class);
final ImmutableDeploymentConfiguration deploymentConfig = config.getTypes().getByName(type);
ImmutableGcloudExtension gcloud = getProject().getRootProject().getExtensions().getByType(GcloudExtension.class);
ImmutableList.Builder<EnvVar> envVars = ImmutableList.<EnvVar>builder().addAll(deploymentConfig.envVars().entrySet().stream().map((entry) -> new EnvVar(entry.getKey(), entry.getValue(), null))::iterator).addAll(deploymentConfig.secretEnvVars().entrySet().stream().map((entry) -> new EnvVar(entry.getKey(), null, new EnvVarSourceBuilder().withSecretKeyRef(new SecretKeySelectorBuilder().withName(entry.getValue().get(0)).withKey(entry.getValue().get(1)).build()).build()))::iterator);
if (!deploymentConfig.envVars().containsKey("JAVA_OPTS")) {
int heapSize = deploymentConfig.jvmHeapMb();
StringBuilder javaOpts = new StringBuilder();
javaOpts.append("--add-opens java.base/jdk.internal.misc=ALL-UNNAMED ").append("--add-opens jdk.unsupported/sun.misc=ALL-UNNAMED ").append("-Xms").append(heapSize).append("m ").append("-Xmx").append(heapSize).append("m ").append("-Dconfig.resource=application-").append(type).append(".conf ").append("-Dmonitoring.stackdriverProjectId=").append(gcloud.clusterProject()).append(" ").append("-Dmonitoring.serverName=").append(deploymentConfig.deploymentName()).append(" ");
if (!deploymentConfig.request()) {
int numCpus = (int) Math.ceil(Double.parseDouble(deploymentConfig.cpu()));
int numWorkers = numCpus * 2;
javaOpts.append("-XX:ParallelGCThreads=").append(numCpus).append(" ").append("-Dcom.linecorp.armeria.numCommonWorkers=").append(numWorkers).append(" ").append("-Dio.netty.availableProcessors=").append(numCpus).append(" ");
}
if (!type.equals("prod")) {
javaOpts.append("-Dcom.linecorp.armeria.verboseExceptions=true ");
}
envVars.add(new EnvVar("JAVA_OPTS", javaOpts.toString(), null));
}
Map<String, Quantity> resources = ImmutableMap.of("cpu", new Quantity(deploymentConfig.cpu()), "memory", new Quantity(deploymentConfig.memoryMb() + "Mi"));
Deployment deployment = new DeploymentBuilder().withMetadata(new ObjectMetaBuilder().withNamespace(deploymentConfig.namespace()).withName(deploymentConfig.deploymentName()).build()).withSpec(new DeploymentSpecBuilder().withReplicas(deploymentConfig.replicas()).withStrategy(new DeploymentStrategyBuilder().withType("RollingUpdate").withRollingUpdate(new RollingUpdateDeploymentBuilder().withNewMaxUnavailable(0).build()).build()).withSelector(new LabelSelectorBuilder().withMatchLabels(ImmutableMap.of("name", deploymentConfig.deploymentName())).build()).withTemplate(new PodTemplateSpecBuilder().withMetadata(new ObjectMetaBuilder().withLabels(ImmutableMap.of("name", deploymentConfig.deploymentName(), "revision", System.getenv().getOrDefault("REVISION_ID", "none"))).withAnnotations(ImmutableMap.<String, String>builder().put("prometheus.io/scrape", "true").put("prometheus.io/scheme", "https").put("prometheus.io/path", "/internal/metrics").put("prometheus.io/port", String.valueOf(deploymentConfig.containerPort())).build()).build()).withSpec(new PodSpecBuilder().withContainers(new ContainerBuilder().withResources(new ResourceRequirementsBuilder().withLimits(!deploymentConfig.request() ? resources : ImmutableMap.of()).withRequests(deploymentConfig.request() ? resources : ImmutableMap.of()).build()).withImage(deploymentConfig.image()).withName(deploymentConfig.deploymentName()).withEnv(envVars.build()).withImagePullPolicy("Always").withReadinessProbe(createProbe(deploymentConfig, Duration.ofSeconds(5))).withLivenessProbe(createProbe(deploymentConfig, Duration.ofSeconds(15))).withPorts(ImmutableList.of(new ContainerPortBuilder().withContainerPort(deploymentConfig.containerPort()).withName("http").build())).withVolumeMounts(new VolumeMountBuilder().withName("tls").withMountPath("/etc/tls").withReadOnly(true).build(), new VolumeMountBuilder().withName("rpcacls").withMountPath("/etc/rpcacls").withReadOnly(true).build()).build()).withVolumes(new VolumeBuilder().withName("tls").withSecret(new SecretVolumeSourceBuilder().withSecretName("server-tls").build()).build(), new VolumeBuilder().withName("rpcacls").withConfigMap(new ConfigMapVolumeSourceBuilder().withName("rpcacls").build()).build()).build()).build()).build()).build();
KubernetesClient client = new DefaultKubernetesClient();
Service service = new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(deploymentConfig.deploymentName()).withNamespace(deploymentConfig.namespace()).withAnnotations(ImmutableMap.<String, String>builder().put("service.alpha.kubernetes.io/app-protocols", "{\"https\":\"HTTPS\"}").put("prometheus.io/scrape", "true").put("prometheus.io/scheme", "https").put("prometheus.io/path", "/internal/metrics").put("prometheus.io/port", String.valueOf(deploymentConfig.containerPort())).put("prometheus.io/probe", "true").build()).build()).withSpec(createServiceSpec(deploymentConfig)).build();
Map<String, Service> additionalServices = new HashMap<>();
for (String path : deploymentConfig.additionalServicePaths()) {
String sanitizedPath = path;
if (sanitizedPath.endsWith("/*")) {
sanitizedPath = sanitizedPath.substring(0, path.length() - 2);
}
String serviceName = deploymentConfig.deploymentName() + sanitizedPath.replace('/', '-');
additionalServices.put(path, new ServiceBuilder().withMetadata(new ObjectMetaBuilder().withName(serviceName).withNamespace(deploymentConfig.namespace()).withAnnotations(ImmutableMap.of("service.alpha.kubernetes.io/app-protocols", "{\"https\":\"HTTPS\"}")).build()).withSpec(createServiceSpec(deploymentConfig)).build());
}
client.resource(deployment).createOrReplace();
deployService(service, client);
additionalServices.values().forEach(s -> deployService(s, client));
if (deploymentConfig.externalHost() != null) {
List<HTTPIngressPath> ingressPaths = new ArrayList<>();
additionalServices.forEach((path, s) -> ingressPaths.add(createIngressPath(path, s.getMetadata().getName(), deploymentConfig)));
ingressPaths.add(createIngressPath("/*", deploymentConfig.deploymentName(), deploymentConfig));
Ingress ingress = new IngressBuilder().withMetadata(new ObjectMetaBuilder().withNamespace(deploymentConfig.namespace()).withName(deploymentConfig.deploymentName()).withAnnotations(ImmutableMap.of("kubernetes.io/tls-acme", "true", "kubernetes.io/ingress.class", "gce")).build()).withSpec(new IngressSpecBuilder().withTls(new IngressTLSBuilder().withSecretName(deploymentConfig.deploymentName() + "-tls").withHosts(deploymentConfig.externalHost()).build()).withRules(new IngressRuleBuilder().withHost(deploymentConfig.externalHost()).withHttp(new HTTPIngressRuleValueBuilder().withPaths(ingressPaths).build()).build()).build()).build();
client.resource(ingress).createOrReplace();
}
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project carbon-apimgt by wso2.
the class ServiceDiscovererKubernetes method listServices.
/**
* {@inheritDoc}
*/
@Override
public List<Endpoint> listServices(String namespace, Map<String, String> criteria) throws ServiceDiscoveryException {
List<Endpoint> endpointList = new ArrayList<>();
if (client != null) {
log.debug("Looking for services, with the specified labels, in namespace {}", namespace);
try {
List<Service> serviceList = client.services().inNamespace(namespace).withLabels(criteria).list().getItems();
addServicesToEndpointList(serviceList, endpointList);
} catch (KubernetesClientException | MalformedURLException e) {
String msg = "Error occurred while trying to list services using Kubernetes client";
throw new ServiceDiscoveryException(msg, e, ExceptionCodes.ERROR_WHILE_TRYING_TO_DISCOVER_SERVICES);
} catch (NoSuchMethodError e) {
String msg = "Filtering criteria in the deployment yaml includes unwanted characters";
throw new ServiceDiscoveryException(msg, e, ExceptionCodes.ERROR_WHILE_TRYING_TO_DISCOVER_SERVICES);
}
}
return endpointList;
}
Aggregations