use of io.fabric8.kubernetes.client.dsl.Resource in project fabric8-maven-plugin by fabric8io.
the class DeploymentConfigOpenShiftConverter method convert.
@Override
public HasMetadata convert(HasMetadata item, boolean trimImageInContainerSpec, boolean enableAutomaticTrigger) {
if (item instanceof DeploymentConfig) {
DeploymentConfig resource = (DeploymentConfig) item;
if (openshiftDeployTimeoutSeconds != null && openshiftDeployTimeoutSeconds > 0) {
DeploymentConfigBuilder builder = new DeploymentConfigBuilder(resource);
DeploymentConfigFluent.SpecNested<DeploymentConfigBuilder> specBuilder;
if (resource.getSpec() != null) {
specBuilder = builder.editSpec();
} else {
specBuilder = builder.withNewSpec();
}
specBuilder.withNewStrategy().withType("Rolling").withNewRollingParams().withTimeoutSeconds(openshiftDeployTimeoutSeconds).endRollingParams().endStrategy();
specBuilder.endSpec();
return builder.build();
}
}
return item;
}
use of io.fabric8.kubernetes.client.dsl.Resource in project fabric8-maven-plugin by fabric8io.
the class DockerImageWatcher method updateImageName.
private void updateImageName(KubernetesClient kubernetes, String namespace, HasMetadata entity, String imagePrefix, String imageName) {
String name = KubernetesHelper.getName(entity);
if (entity instanceof Deployment) {
Deployment resource = (Deployment) entity;
DeploymentSpec spec = resource.getSpec();
if (spec != null) {
if (updateImageName(entity, spec.getTemplate(), imagePrefix, imageName)) {
kubernetes.extensions().deployments().inNamespace(namespace).withName(name).replace(resource);
}
}
} else if (entity instanceof ReplicaSet) {
ReplicaSet resource = (ReplicaSet) entity;
ReplicaSetSpec spec = resource.getSpec();
if (spec != null) {
if (updateImageName(entity, spec.getTemplate(), imagePrefix, imageName)) {
kubernetes.extensions().replicaSets().inNamespace(namespace).withName(name).replace(resource);
}
}
} else if (entity instanceof ReplicationController) {
ReplicationController resource = (ReplicationController) entity;
ReplicationControllerSpec spec = resource.getSpec();
if (spec != null) {
if (updateImageName(entity, spec.getTemplate(), imagePrefix, imageName)) {
kubernetes.replicationControllers().inNamespace(namespace).withName(name).replace(resource);
}
}
} else if (entity instanceof DeploymentConfig) {
DeploymentConfig resource = (DeploymentConfig) entity;
DeploymentConfigSpec spec = resource.getSpec();
if (spec != null) {
if (updateImageName(entity, spec.getTemplate(), imagePrefix, imageName)) {
OpenShiftClient openshiftClient = new Controller(kubernetes).getOpenShiftClientOrNull();
if (openshiftClient == null) {
log.warn("Ignoring DeploymentConfig %s as not connected to an OpenShift cluster", name);
}
openshiftClient.deploymentConfigs().inNamespace(namespace).withName(name).replace(resource);
}
}
}
}
use of io.fabric8.kubernetes.client.dsl.Resource in project strimzi by strimzi.
the class AbstractResourceOperatorTest method deleteWhenResourceExistsStillDeletes.
@Test
public void deleteWhenResourceExistsStillDeletes(TestContext context) {
T resource = resource();
Resource mockResource = mock(resourceType());
when(mockResource.get()).thenReturn(resource);
NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
when(mockNameable.withName(matches(RESOURCE_NAME))).thenReturn(mockResource);
MixedOperation mockCms = mock(MixedOperation.class);
when(mockCms.inNamespace(matches(NAMESPACE))).thenReturn(mockNameable);
C mockClient = mock(clientType());
mocker(mockClient, mockCms);
AbstractResourceOperator<C, T, L, D, R, P> op = createResourceOperations(vertx, mockClient);
Async async = context.async();
op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null).setHandler(ar -> {
assertTrue(ar.succeeded());
verify(mockResource).delete();
async.complete();
});
}
use of io.fabric8.kubernetes.client.dsl.Resource in project strimzi by strimzi.
the class KafkaCluster method fromAssembly.
/**
* Create a Kafka cluster from the deployed StatefulSet resource
*
* @param ss The StatefulSet from which the cluster state should be recovered.
* @param namespace Kubernetes/OpenShift namespace where cluster resources belong to
* @param cluster overall cluster name
* @return Kafka cluster instance
*/
public static KafkaCluster fromAssembly(StatefulSet ss, String namespace, String cluster) {
KafkaCluster kafka = new KafkaCluster(namespace, cluster, Labels.fromResource(ss));
kafka.setReplicas(ss.getSpec().getReplicas());
Container container = ss.getSpec().getTemplate().getSpec().getContainers().get(0);
kafka.setImage(container.getImage());
kafka.setHealthCheckInitialDelay(container.getReadinessProbe().getInitialDelaySeconds());
kafka.setHealthCheckTimeout(container.getReadinessProbe().getTimeoutSeconds());
Map<String, String> vars = containerEnvVars(container);
kafka.setZookeeperConnect(vars.getOrDefault(KEY_KAFKA_ZOOKEEPER_CONNECT, ss.getMetadata().getName() + "-zookeeper:2181"));
kafka.setDefaultReplicationFactor(Integer.parseInt(vars.getOrDefault(KEY_KAFKA_DEFAULT_REPLICATION_FACTOR, String.valueOf(DEFAULT_KAFKA_DEFAULT_REPLICATION_FACTOR))));
kafka.setOffsetsTopicReplicationFactor(Integer.parseInt(vars.getOrDefault(KEY_KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR, String.valueOf(DEFAULT_KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR))));
kafka.setTransactionStateLogReplicationFactor(Integer.parseInt(vars.getOrDefault(KEY_KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR, String.valueOf(DEFAULT_KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR))));
kafka.setMetricsEnabled(Boolean.parseBoolean(vars.getOrDefault(KEY_KAFKA_METRICS_ENABLED, String.valueOf(DEFAULT_KAFKA_METRICS_ENABLED))));
if (kafka.isMetricsEnabled()) {
kafka.setMetricsConfigName(metricConfigsName(cluster));
}
if (!ss.getSpec().getVolumeClaimTemplates().isEmpty()) {
Storage storage = Storage.fromPersistentVolumeClaim(ss.getSpec().getVolumeClaimTemplates().get(0));
if (ss.getMetadata().getAnnotations() != null) {
String deleteClaimAnnotation = String.format("%s/%s", ClusterController.STRIMZI_CLUSTER_CONTROLLER_DOMAIN, Storage.DELETE_CLAIM_FIELD);
storage.withDeleteClaim(Boolean.valueOf(ss.getMetadata().getAnnotations().computeIfAbsent(deleteClaimAnnotation, s -> "false")));
}
kafka.setStorage(storage);
} else {
Storage storage = new Storage(Storage.StorageType.EPHEMERAL);
kafka.setStorage(storage);
}
return kafka;
}
use of io.fabric8.kubernetes.client.dsl.Resource in project strimzi by strimzi.
the class KafkaConnectS2ICluster method generateBuildConfig.
/**
* Generate new BuildConfig
*
* @return BuildConfig resource definition
*/
public BuildConfig generateBuildConfig() {
BuildTriggerPolicy triggerConfigChange = new BuildTriggerPolicy();
triggerConfigChange.setType("ConfigChange");
BuildTriggerPolicy triggerImageChange = new BuildTriggerPolicy();
triggerImageChange.setType("ImageChange");
triggerImageChange.setImageChange(new ImageChangeTrigger());
BuildConfig build = new BuildConfigBuilder().withNewMetadata().withName(name).withLabels(getLabelsWithName()).withNamespace(namespace).endMetadata().withNewSpec().withFailedBuildsHistoryLimit(5).withNewOutput().withNewTo().withKind("ImageStreamTag").withName(image).endTo().endOutput().withRunPolicy("Serial").withNewSource().withType("Binary").withBinary(new BinaryBuildSource()).endSource().withNewStrategy().withType("Source").withNewSourceStrategy().withNewFrom().withKind("ImageStreamTag").withName(getSourceImageStreamName() + ":" + sourceImageTag).endFrom().endSourceStrategy().endStrategy().withTriggers(triggerConfigChange, triggerImageChange).endSpec().build();
return build;
}
Aggregations