use of io.fabric8.kubernetes.api.model.extensions.Deployment in project flink by apache.
the class KubernetesJobManagerFactoryTest method testDeploymentMetadata.
@Test
public void testDeploymentMetadata() throws IOException {
kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(flinkPod, kubernetesJobManagerParameters);
final Deployment resultDeployment = this.kubernetesJobManagerSpecification.getDeployment();
assertEquals(Constants.APPS_API_VERSION, resultDeployment.getApiVersion());
assertEquals(KubernetesUtils.getDeploymentName(CLUSTER_ID), resultDeployment.getMetadata().getName());
final Map<String, String> expectedLabels = getCommonLabels();
expectedLabels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER);
expectedLabels.putAll(userLabels);
assertEquals(expectedLabels, resultDeployment.getMetadata().getLabels());
assertThat(resultDeployment.getMetadata().getAnnotations(), equalTo(userAnnotations));
assertThat(resultDeployment.getMetadata().getOwnerReferences(), Matchers.containsInAnyOrder(OWNER_REFERENCES.toArray()));
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project flink by apache.
the class Fabric8FlinkKubeClient method createTaskManagerPod.
@Override
public CompletableFuture<Void> createTaskManagerPod(KubernetesPod kubernetesPod) {
return CompletableFuture.runAsync(() -> {
final Deployment masterDeployment = this.internalClient.apps().deployments().withName(KubernetesUtils.getDeploymentName(clusterId)).get();
if (masterDeployment == null) {
throw new RuntimeException("Failed to find Deployment named " + clusterId + " in namespace " + this.namespace);
}
// Note that we should use the uid of the master Deployment for the
// OwnerReference.
setOwnerReference(masterDeployment, Collections.singletonList(kubernetesPod.getInternalResource()));
LOG.debug("Start to create pod with spec {}{}", System.lineSeparator(), KubernetesUtils.tryToGetPrettyPrintYaml(kubernetesPod.getInternalResource()));
this.internalClient.pods().create(kubernetesPod.getInternalResource());
}, kubeClientExecutorService);
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project flink by apache.
the class Fabric8FlinkKubeClient method setOwnerReference.
private void setOwnerReference(Deployment deployment, List<HasMetadata> resources) {
final OwnerReference deploymentOwnerReference = new OwnerReferenceBuilder().withName(deployment.getMetadata().getName()).withApiVersion(deployment.getApiVersion()).withUid(deployment.getMetadata().getUid()).withKind(deployment.getKind()).withController(true).withBlockOwnerDeletion(true).build();
resources.forEach(resource -> resource.getMetadata().setOwnerReferences(Collections.singletonList(deploymentOwnerReference)));
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project syndesis-qe by syndesisio.
the class Syndesis method deployOperator.
public void deployOperator() {
List<HasMetadata> resourceList = getOperatorResources();
final String operatorResourcesName = "syndesis-operator";
Optional<HasMetadata> serviceAccount = resourceList.stream().filter(resource -> "ServiceAccount".equals(resource.getKind()) && operatorResourcesName.equals(resource.getMetadata().getName())).findFirst();
if (serviceAccount.isPresent()) {
((ServiceAccount) serviceAccount.get()).getImagePullSecrets().add(new LocalObjectReference(TestConfiguration.syndesisPullSecretName()));
} else {
log.error("Service account not found in resources");
}
OpenShiftUtils.getInstance().serviceAccounts().withName("default").edit().addToImagePullSecrets(new LocalObjectReference(TestConfiguration.syndesisPullSecretName())).done();
List<EnvVar> envVarsToAdd = new ArrayList<>();
envVarsToAdd.add(new EnvVar("TEST_SUPPORT", "true", null));
// For upgrade, we want to override images only for "current" version
if (operatorImage.equals(TestConfiguration.syndesisOperatorImage())) {
Set<Image> images = EnumSet.allOf(Image.class);
for (Image image : images) {
if (TestConfiguration.image(image) != null) {
// override image, e.g. from BUILD_PROPERTIES
log.info("Overriding " + image.name().toLowerCase() + " image with " + TestConfiguration.image(image));
envVarsToAdd.add(new EnvVar("RELATED_IMAGE_" + image.name(), TestConfiguration.image(image), null));
} else {
// use images from Quay instead of DockerHub for Syndesis components
switch(image) {
case META:
case S2I:
case UI:
case SERVER:
case UPGRADE:
log.info("Overriding " + image.name().toLowerCase() + " image with quay variant");
String version = TestConfiguration.syndesisInstallVersion() != null ? TestConfiguration.syndesisInstallVersion() : "latest";
envVarsToAdd.add(new EnvVar("RELATED_IMAGE_" + image.name(), String.format("%s/syndesis/syndesis-%s:%s", TestConfiguration.get().readValue(SYNDESIS_DOCKER_REGISTRY), image.name().toLowerCase(), version), null));
}
}
}
}
if ((TestUtils.isProdBuild() && getOperatorImage().contains("1.8")) || getOperatorImage().contains("1.11")) {
// apply this hotfix for 1.8 prod version, needs for OSD because it doesn't see proxy eng repo
if (TestUtils.isProdBuild()) {
envVarsToAdd.add(new EnvVar("RELATED_IMAGE_PSQL_EXPORTER", "registry.redhat.io/fuse7/fuse-postgres-exporter-rhel7:1.8", null));
}
// needs for upgrade test when previous version is 1.11
DeploymentConfig dc = (DeploymentConfig) resourceList.stream().filter(r -> "DeploymentConfig".equals(r.getKind()) && operatorResourcesName.equals(r.getMetadata().getName())).findFirst().orElseThrow(() -> new RuntimeException("Unable to find deployment config in operator resources"));
dc.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv().addAll(envVarsToAdd);
} else {
Deployment deployment = (Deployment) resourceList.stream().filter(r -> "Deployment".equals(r.getKind()) && operatorResourcesName.equals(r.getMetadata().getName())).findFirst().orElseThrow(() -> new RuntimeException("Unable to find deployment in operator resources"));
deployment.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv().addAll(envVarsToAdd);
}
OpenShiftUtils.asRegularUser(() -> OpenShiftUtils.getInstance().resourceList(resourceList).createOrReplace());
log.info("Waiting for syndesis-operator to be ready");
try {
OpenShiftUtils.getInstance().waiters().areExactlyNPodsReady(1, "syndesis.io/component", operatorResourcesName).interval(TimeUnit.SECONDS, 20).timeout(TimeUnit.MINUTES, 10).waitFor();
} catch (WaiterException e) {
InfraFail.fail("Unable to find operator pod in 10 minutes");
}
}
use of io.fabric8.kubernetes.api.model.extensions.Deployment in project syndesis-qe by syndesisio.
the class OpenshiftValidationSteps method setResourceValues.
@When("set resources for deployment config {string}")
public void setResourceValues(String dcName, DataTable dataTable) {
DeploymentConfig dc = OpenShiftUtils.getInstance().getDeploymentConfig(dcName);
Map<String, Quantity> currentLimits = dc.getSpec().getTemplate().getSpec().getContainers().get(0).getResources().getLimits();
Map<String, Quantity> currentRequests = dc.getSpec().getTemplate().getSpec().getContainers().get(0).getResources().getRequests();
Map<String, Quantity> limits = currentLimits == null ? new HashMap<>() : new HashMap<>(currentLimits);
Map<String, Quantity> requests = currentRequests == null ? new HashMap<>() : new HashMap<>(currentRequests);
for (List<String> l : dataTable.asLists()) {
if ("limits".equals(l.get(0))) {
limits.put(l.get(1), new Quantity(l.get(2)));
} else {
requests.put(l.get(1), new Quantity(l.get(2)));
}
}
// @formatter:off
OpenShiftUtils.getInstance().deploymentConfigs().withName(dcName).edit().editSpec().editTemplate().editSpec().editFirstContainer().editResources().withLimits(limits).withRequests(requests).endResources().endContainer().endSpec().endTemplate().endSpec().done();
// @formatter:on
}
Aggregations