Search in sources :

Example 76 with HasMetadata

use of io.fabric8.kubernetes.api.model.HasMetadata in project vertx-openshift-it by cescoffier.

the class CircuitBreakerIT method deployApp.

/**
 * @param name
 * @param templatePath
 * @return the app route
 * @throws IOException
 */
private static String deployApp(String name, String templatePath) throws IOException {
    String appName = "";
    List<? extends HasMetadata> entities = OPENSHIFT.deploy(name, new File(templatePath));
    Optional<String> first = entities.stream().filter(hm -> hm instanceof DeploymentConfig).map(hm -> (DeploymentConfig) hm).map(dc -> dc.getMetadata().getName()).findFirst();
    if (first.isPresent()) {
        appName = first.get();
    } else {
        throw new IllegalStateException("Application deployment config not found");
    }
    Route route = OPENSHIFT.client().routes().inNamespace(OPENSHIFT.project()).withName(appName).get();
    assertThat(route).isNotNull();
    return "http://" + route.getSpec().getHost();
}
Also used : CircuitBreakerState(io.vertx.circuitbreaker.CircuitBreakerState) Awaitility.await(org.awaitility.Awaitility.await) AfterClass(org.junit.AfterClass) BeforeClass(org.junit.BeforeClass) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) IsEqual.equalTo(org.hamcrest.core.IsEqual.equalTo) Pod(io.fabric8.kubernetes.api.model.Pod) DeploymentConfig(io.fabric8.openshift.api.model.DeploymentConfig) Test(org.junit.Test) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) File(java.io.File) OpenShiftTestAssistant(io.vertx.it.openshift.utils.OpenShiftTestAssistant) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Route(io.fabric8.openshift.api.model.Route) Response(io.restassured.response.Response) Optional(java.util.Optional) JsonObject(io.vertx.core.json.JsonObject) RestAssured.get(io.restassured.RestAssured.get) RestAssured(io.restassured.RestAssured) Before(org.junit.Before) DeploymentConfig(io.fabric8.openshift.api.model.DeploymentConfig) File(java.io.File) Route(io.fabric8.openshift.api.model.Route)

Example 77 with HasMetadata

use of io.fabric8.kubernetes.api.model.HasMetadata in project vertx-openshift-it by cescoffier.

the class OpenShiftTestAssistant method deploy.

public List<? extends HasMetadata> deploy(String name, File template) throws IOException {
    try (FileInputStream fis = new FileInputStream(template)) {
        List<HasMetadata> entities = client.load(fis).createOrReplace();
        created.put(name, entities);
        System.out.println(name + " deployed, " + entities.size() + " object(s) created.");
        return entities;
    }
}
Also used : HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) FileInputStream(java.io.FileInputStream)

Example 78 with HasMetadata

use of io.fabric8.kubernetes.api.model.HasMetadata in project strimzi by strimzi.

the class AbstractAssemblyOperator method reconcileAll.

/**
 * Reconcile assembly resources in the given namespace having the given selector.
 * Reconciliation works by getting the assembly ConfigMaps in the given namespace with the given selector and
 * comparing with the corresponding {@linkplain #getResources(String) resource}.
 * <ul>
 * <li>An assembly will be {@linkplain #createOrUpdate(Reconciliation, ConfigMap, Handler) created} for all ConfigMaps without same-named resources</li>
 * <li>An assembly will be {@linkplain #delete(Reconciliation, Handler) deleted} for all resources without same-named ConfigMaps</li>
 * </ul>
 *
 * @param trigger A description of the triggering event (timer or watch), used for logging
 * @param namespace The namespace
 * @param selector The selector
 */
public final CountDownLatch reconcileAll(String trigger, String namespace, Labels selector) {
    Labels selectorWithCluster = selector.withType(assemblyType);
    // get ConfigMaps with kind=cluster&type=kafka (or connect, or connect-s2i) for the corresponding cluster type
    List<ConfigMap> cms = configMapOperations.list(namespace, selectorWithCluster);
    Set<String> cmsNames = cms.stream().map(cm -> cm.getMetadata().getName()).collect(Collectors.toSet());
    log.debug("reconcileAll({}, {}): ConfigMaps with labels {}: {}", assemblyType, trigger, selectorWithCluster, cmsNames);
    // get resources with kind=cluster&type=kafka (or connect, or connect-s2i)
    List<? extends HasMetadata> resources = getResources(namespace);
    // now extract the cluster name from those
    Set<String> resourceNames = resources.stream().filter(// exclude Cluster CM, which won't have a cluster label
    r -> Labels.kind(r) == null).map(Labels::cluster).collect(Collectors.toSet());
    log.debug("reconcileAll({}, {}): Other resources with labels {}: {}", assemblyType, trigger, selectorWithCluster, resourceNames);
    cmsNames.addAll(resourceNames);
    // We use a latch so that callers (specifically, test callers) know when the reconciliation is complete
    // Using futures would be more complex for no benefit
    CountDownLatch latch = new CountDownLatch(cmsNames.size());
    for (String name : cmsNames) {
        Reconciliation reconciliation = new Reconciliation(trigger, assemblyType, namespace, name);
        reconcileAssembly(reconciliation, result -> {
            if (result.succeeded()) {
                log.info("{}: Assembly reconciled", reconciliation);
            } else {
                log.error("{}: Failed to reconcile", reconciliation);
            }
            latch.countDown();
        });
    }
    return latch;
}
Also used : AssemblyType(io.strimzi.controller.cluster.model.AssemblyType) Logger(org.slf4j.Logger) Vertx(io.vertx.core.Vertx) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) ConfigMapOperator(io.strimzi.controller.cluster.operator.resource.ConfigMapOperator) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Labels(io.strimzi.controller.cluster.model.Labels) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Lock(io.vertx.core.shareddata.Lock) ObjectMeta(io.fabric8.kubernetes.api.model.ObjectMeta) Reconciliation(io.strimzi.controller.cluster.Reconciliation) AsyncResult(io.vertx.core.AsyncResult) Handler(io.vertx.core.Handler) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) Reconciliation(io.strimzi.controller.cluster.Reconciliation) Labels(io.strimzi.controller.cluster.model.Labels) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 79 with HasMetadata

use of io.fabric8.kubernetes.api.model.HasMetadata in project strimzi by strimzi.

the class LabelPredicateTest method testTest.

@Test
public void testTest() {
    LabelPredicate p = new LabelPredicate("foo", "1", "bar", "2");
    HasMetadata h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "1").addToLabels("bar", "2").endMetadata().build();
    assertTrue(p.test(h));
    h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "1").addToLabels("bar", "2").addToLabels("baz", "3").endMetadata().build();
    assertTrue(p.test(h));
    h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "2").addToLabels("bar", "2").endMetadata().build();
    assertFalse(p.test(h));
    h = new ConfigMapBuilder().editOrNewMetadata().addToLabels("foo", "1").endMetadata().build();
    assertFalse(p.test(h));
    h = new ConfigMapBuilder().editOrNewMetadata().endMetadata().build();
    assertFalse(p.test(h));
}
Also used : HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) Test(org.junit.Test)

Example 80 with HasMetadata

use of io.fabric8.kubernetes.api.model.HasMetadata in project strimzi by strimzi.

the class Controller method update3Way.

private void update3Way(HasMetadata involvedObject, Topic k8sTopic, Topic kafkaTopic, Topic privateTopic, Handler<AsyncResult<Void>> reconciliationResultHandler) {
    if (!privateTopic.getMapName().equals(k8sTopic.getMapName())) {
        reconciliationResultHandler.handle(Future.failedFuture(new ControllerException(involvedObject, "Topic '" + kafkaTopic.getTopicName() + "' is already managed via ConfigMap '" + privateTopic.getMapName() + "' it cannot also be managed via the ConfiMap '" + k8sTopic.getMapName() + "'")));
        return;
    }
    TopicDiff oursKafka = TopicDiff.diff(privateTopic, kafkaTopic);
    LOGGER.debug("topicStore->kafkaTopic: {}", oursKafka);
    TopicDiff oursK8s = TopicDiff.diff(privateTopic, k8sTopic);
    LOGGER.debug("topicStore->k8sTopic: {}", oursK8s);
    String conflict = oursKafka.conflict(oursK8s);
    if (conflict != null) {
        final String message = "ConfigMap and Topic both changed in a conflicting way: " + conflict;
        LOGGER.error(message);
        enqueue(new Event(involvedObject, message, EventType.INFO, eventResult -> {
        }));
        reconciliationResultHandler.handle(Future.failedFuture(new Exception(message)));
    } else {
        TopicDiff merged = oursKafka.merge(oursK8s);
        LOGGER.debug("Diffs do not conflict, merged diff: {}", merged);
        if (merged.isEmpty()) {
            LOGGER.info("All three topics are identical");
            reconciliationResultHandler.handle(Future.succeededFuture());
        } else {
            Topic result = merged.apply(privateTopic);
            int partitionsDelta = merged.numPartitionsDelta();
            if (partitionsDelta < 0) {
                final String message = "Number of partitions cannot be decreased";
                LOGGER.error(message);
                enqueue(new Event(involvedObject, message, EventType.INFO, eventResult -> {
                }));
                reconciliationResultHandler.handle(Future.failedFuture(new Exception(message)));
            } else {
                if (merged.changesReplicationFactor()) {
                    LOGGER.error("Changes replication factor");
                    enqueue(new ChangeReplicationFactor(result, involvedObject, null));
                }
                // TODO What if we increase min.in.sync.replicas and the number of replicas,
                // such that the old number of replicas < the new min isr? But likewise
                // we could decrease, so order of tasks in the queue will need to change
                // depending on what the diffs are.
                LOGGER.debug("Updating cm, kafka topic and topicStore");
                // TODO replace this with compose
                enqueue(new UpdateConfigMap(result, ar -> {
                    Handler<Void> topicStoreHandler = ignored -> enqueue(new UpdateInTopicStore(result, involvedObject, reconciliationResultHandler));
                    Handler<Void> partitionsHandler;
                    if (partitionsDelta > 0) {
                        partitionsHandler = ar4 -> enqueue(new IncreaseKafkaPartitions(result, involvedObject, ar2 -> topicStoreHandler.handle(null)));
                    } else {
                        partitionsHandler = topicStoreHandler;
                    }
                    if (merged.changesConfig()) {
                        enqueue(new UpdateKafkaConfig(result, involvedObject, ar2 -> partitionsHandler.handle(null)));
                    } else {
                        enqueue(partitionsHandler);
                    }
                }));
            }
        }
    }
}
Also used : Logger(org.slf4j.Logger) Vertx(io.vertx.core.Vertx) LoggerFactory(org.slf4j.LoggerFactory) Collections.disjoint(java.util.Collections.disjoint) HashMap(java.util.HashMap) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Future(io.vertx.core.Future) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) CompositeFuture(io.vertx.core.CompositeFuture) EventBuilder(io.fabric8.kubernetes.api.model.EventBuilder) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) Map(java.util.Map) AsyncResult(io.vertx.core.AsyncResult) Handler(io.vertx.core.Handler) Handler(io.vertx.core.Handler) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) Collections.disjoint(java.util.Collections.disjoint)

Aggregations

HasMetadata (io.fabric8.kubernetes.api.model.HasMetadata)90 Test (org.junit.Test)27 ArrayList (java.util.ArrayList)25 File (java.io.File)24 IOException (java.io.IOException)24 Template (io.fabric8.openshift.api.model.Template)19 Deployment (io.fabric8.kubernetes.api.model.extensions.Deployment)18 KubernetesClientException (io.fabric8.kubernetes.client.KubernetesClientException)18 OpenShiftClient (io.fabric8.openshift.client.OpenShiftClient)18 KubernetesList (io.fabric8.kubernetes.api.model.KubernetesList)17 DeploymentConfig (io.fabric8.openshift.api.model.DeploymentConfig)16 ConfigMap (io.fabric8.kubernetes.api.model.ConfigMap)14 ReplicationController (io.fabric8.kubernetes.api.model.ReplicationController)14 Service (io.fabric8.kubernetes.api.model.Service)14 URL (java.net.URL)11 HashMap (java.util.HashMap)11 ReplicaSet (io.fabric8.kubernetes.api.model.extensions.ReplicaSet)10 Map (java.util.Map)10 MojoExecutionException (org.apache.maven.plugin.MojoExecutionException)10 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)9