use of io.fabric8.openshift.api.model.DeploymentConfig in project syndesis by syndesisio.
the class OpenShiftServiceImplTest method testDeploy.
@SuppressWarnings({ "PMD.ExcessiveMethodLength", "PMD.JUnitTestsShouldIncludeAssert" })
@Test
public void testDeploy() {
String name = "test-deployment";
OpenShiftConfigurationProperties config = new OpenShiftConfigurationProperties();
OpenShiftServiceImpl service = new OpenShiftServiceImpl(client, config);
DeploymentData deploymentData = new DeploymentData.Builder().addAnnotation("testName", testName.getMethodName()).addLabel("type", "test").addSecretEntry("secret-key", "secret-val").withImage("testimage").build();
ImageStream expectedImageStream = new ImageStreamBuilder().withNewMetadata().withName(name).endMetadata().build();
Secret expectedSecret = new SecretBuilder().withNewMetadata().withName(name).addToAnnotations(deploymentData.getAnnotations()).addToLabels(deploymentData.getLabels()).endMetadata().withStringData(deploymentData.getSecret()).build();
DeploymentConfig expectedDeploymentConfig = new DeploymentConfigBuilder().withNewMetadata().withName(OpenShiftServiceImpl.openshiftName(name)).addToAnnotations(deploymentData.getAnnotations()).addToLabels(deploymentData.getLabels()).endMetadata().withNewSpec().withReplicas(1).addToSelector("integration", name).withNewStrategy().withType("Recreate").withNewResources().addToLimits("memory", new Quantity(config.getDeploymentMemoryLimitMi() + "Mi")).addToRequests("memory", new Quantity(config.getDeploymentMemoryRequestMi() + "Mi")).endResources().endStrategy().withRevisionHistoryLimit(0).withNewTemplate().withNewMetadata().addToLabels("integration", name).addToLabels(OpenShiftServiceImpl.COMPONENT_LABEL, "integration").addToLabels(deploymentData.getLabels()).addToAnnotations(deploymentData.getAnnotations()).addToAnnotations("prometheus.io/scrape", "true").addToAnnotations("prometheus.io/port", "9779").endMetadata().withNewSpec().addNewContainer().withImage(deploymentData.getImage()).withImagePullPolicy("Always").withName(name).addToEnv(new EnvVarBuilder().withName("LOADER_HOME").withValue(config.getIntegrationDataPath()).build()).addToEnv(new EnvVarBuilder().withName("AB_JMX_EXPORTER_CONFIG").withValue("/tmp/src/prometheus-config.yml").build()).addNewPort().withName("jolokia").withContainerPort(8778).endPort().addNewVolumeMount().withName("secret-volume").withMountPath("/deployments/config").withReadOnly(false).endVolumeMount().endContainer().addNewVolume().withName("secret-volume").withNewSecret().withSecretName(expectedSecret.getMetadata().getName()).endSecret().endVolume().endSpec().endTemplate().addNewTrigger().withType("ConfigChange").endTrigger().endSpec().withNewStatus().withLatestVersion(1L).endStatus().build();
server.expect().withPath("/oapi/v1/namespaces/test/imagestreams").andReturn(200, expectedImageStream).always();
server.expect().withPath("/api/v1/namespaces/test/secrets").andReturn(200, expectedSecret).always();
server.expect().withPath("/oapi/v1/namespaces/test/deploymentconfigs").andReturn(200, expectedDeploymentConfig).always();
server.expect().withPath("/oapi/v1/namespaces/test/deploymentconfigs/i-test-deployment").andReturn(200, expectedDeploymentConfig).always();
server.expect().withPath("/oapi/v1/namespaces/test/deploymentconfigs/test-deployment").andReturn(200, expectedDeploymentConfig).always();
service.deploy(name, deploymentData);
}
use of io.fabric8.openshift.api.model.DeploymentConfig in project fabric8 by fabric8io.
the class DeploymentConfigPodsAssert method pods.
public PodSelectionAssert pods() {
spec().isNotNull().selector().isNotNull();
DeploymentConfigSpec spec = this.actual.getSpec();
Integer replicas = spec.getReplicas();
Map<String, String> matchLabels = spec.getSelector();
List<LabelSelectorRequirement> matchExpressions = new ArrayList<>();
return new PodSelectionAssert(client, replicas, matchLabels, matchExpressions, "DeploymentConfig " + KubernetesHelper.getName(actual));
}
use of io.fabric8.openshift.api.model.DeploymentConfig in project fabric8 by fabric8io.
the class KubernetesAssert method deployments.
/**
* Finds all the resources that create pod selections (Deployment, DeploymentConfig, ReplicaSet, ReplicationController)
* and create a {@link HasPodSelectionAssert} to make assertions on their pods that they startup etc.
*
* @return the assertion object for the deployment
*/
public HasPodSelectionAssert deployments() {
List<HasPodSelectionAssert> asserters = new ArrayList<>();
List<HasMetadata> resources = new ArrayList<>();
try {
resources = KubernetesHelper.findKubernetesResourcesOnClasspath(new Controller(client));
} catch (IOException e) {
fail("Failed to load kubernetes resources on the classpath: " + e, e);
}
for (HasMetadata resource : resources) {
HasPodSelectionAssert asserter = createPodSelectionAssert(resource);
if (asserter != null) {
asserters.add(asserter);
}
}
String message = "No pod selection kinds found on the classpath such as Deployment, DeploymentConfig, ReplicaSet, ReplicationController";
// TODO we don't yet support size > 1
assertThat(asserters).describedAs(message).isNotEmpty();
if (asserters.size() == 1) {
return asserters.get(0);
}
return new MultiHasPodSelectionAssert(asserters);
}
use of io.fabric8.openshift.api.model.DeploymentConfig in project fabric8 by fabric8io.
the class Controller method applyEntity.
/**
* Applies the given DTOs onto the Kubernetes master
*/
public void applyEntity(Object dto, String sourceName) throws Exception {
if (dto instanceof Pod) {
applyPod((Pod) dto, sourceName);
} else if (dto instanceof ReplicationController) {
applyReplicationController((ReplicationController) dto, sourceName);
} else if (dto instanceof Service) {
applyService((Service) dto, sourceName);
} else if (dto instanceof Namespace) {
applyNamespace((Namespace) dto);
} else if (dto instanceof Route) {
applyRoute((Route) dto, sourceName);
} else if (dto instanceof BuildConfig) {
applyBuildConfig((BuildConfig) dto, sourceName);
} else if (dto instanceof DeploymentConfig) {
DeploymentConfig resource = (DeploymentConfig) dto;
OpenShiftClient openShiftClient = getOpenShiftClientOrNull();
if (openShiftClient != null && openShiftClient.supportsOpenShiftAPIGroup(OpenShiftAPIGroups.APPS)) {
applyResource(resource, sourceName, openShiftClient.deploymentConfigs());
} else {
LOG.warn("Not connected to OpenShift cluster so cannot apply entity " + dto);
}
} else if (dto instanceof PolicyBinding) {
applyPolicyBinding((PolicyBinding) dto, sourceName);
} else if (dto instanceof RoleBinding) {
applyRoleBinding((RoleBinding) dto, sourceName);
} else if (dto instanceof Role) {
Role resource = (Role) dto;
OpenShiftClient openShiftClient = getOpenShiftClientOrNull();
if (openShiftClient != null && openShiftClient.supportsOpenShiftAPIGroup(OpenShiftAPIGroups.AUTHORIZATION)) {
applyResource(resource, sourceName, openShiftClient.roles());
} else {
LOG.warn("Not connected to OpenShift cluster so cannot apply entity " + dto);
}
} else if (dto instanceof ImageStream) {
applyImageStream((ImageStream) dto, sourceName);
} else if (dto instanceof OAuthClient) {
applyOAuthClient((OAuthClient) dto, sourceName);
} else if (dto instanceof Template) {
applyTemplate((Template) dto, sourceName);
} else if (dto instanceof ServiceAccount) {
applyServiceAccount((ServiceAccount) dto, sourceName);
} else if (dto instanceof Secret) {
applySecret((Secret) dto, sourceName);
} else if (dto instanceof ConfigMap) {
applyResource((ConfigMap) dto, sourceName, kubernetesClient.configMaps());
} else if (dto instanceof DaemonSet) {
applyResource((DaemonSet) dto, sourceName, kubernetesClient.extensions().daemonSets());
} else if (dto instanceof Deployment) {
applyResource((Deployment) dto, sourceName, kubernetesClient.extensions().deployments());
} else if (dto instanceof ReplicaSet) {
applyResource((ReplicaSet) dto, sourceName, kubernetesClient.extensions().replicaSets());
} else if (dto instanceof StatefulSet) {
applyResource((StatefulSet) dto, sourceName, kubernetesClient.apps().statefulSets());
} else if (dto instanceof Ingress) {
applyResource((Ingress) dto, sourceName, kubernetesClient.extensions().ingresses());
} else if (dto instanceof PersistentVolumeClaim) {
applyPersistentVolumeClaim((PersistentVolumeClaim) dto, sourceName);
} else if (dto instanceof HasMetadata) {
HasMetadata entity = (HasMetadata) dto;
try {
String namespace = getNamespace();
String resourceNamespace = getNamespace(entity);
if (Strings.isNotBlank(namespace) && Strings.isNullOrBlank(resourceNamespace)) {
getOrCreateMetadata(entity).setNamespace(namespace);
}
LOG.info("Applying " + getKind(entity) + " " + getName(entity) + " from " + sourceName);
kubernetesClient.resource(entity).inNamespace(namespace).createOrReplace();
} catch (Exception e) {
onApplyError("Failed to create " + getKind(entity) + " from " + sourceName + ". " + e, e);
}
} else {
throw new IllegalArgumentException("Unknown entity type " + dto);
}
}
use of io.fabric8.openshift.api.model.DeploymentConfig in project kie-wb-common by kiegroup.
the class OpenShiftClient method getDeploymentConfigResource.
private DeployableScalableResource<DeploymentConfig, DoneableDeploymentConfig> getDeploymentConfigResource(Service service) {
if (service != null) {
String prjName = service.getMetadata().getNamespace();
Map<String, String> selector = service.getSpec().getSelector();
String dcName = selector.get("deploymentconfig");
if (dcName == null) {
dcName = selector.get("deploymentConfig");
}
if (dcName != null) {
return delegate.deploymentConfigs().inNamespace(prjName).withName(dcName);
}
}
return null;
}
Aggregations