Search in sources :

Example 41 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class OperatorMetricsTest method testPauseReconcile.

@Test
public void testPauseReconcile(VertxTestContext context) {
    MetricsProvider metrics = createCleanMetricsProvider();
    AbstractWatchableStatusedResourceOperator resourceOperator = resourceOperatorWithExistingPausedResource();
    AbstractOperator operator = new AbstractOperator(vertx, "TestResource", resourceOperator, metrics, null) {

        @Override
        protected Future createOrUpdate(Reconciliation reconciliation, CustomResource resource) {
            return Future.succeededFuture();
        }

        @Override
        public Set<Condition> validate(Reconciliation reconciliation, CustomResource resource) {
            return new HashSet<>();
        }

        @Override
        protected Future<Boolean> delete(Reconciliation reconciliation) {
            return null;
        }

        @Override
        protected Status createStatus() {
            return new Status() {
            };
        }
    };
    Checkpoint async = context.checkpoint();
    operator.reconcile(new Reconciliation("test", "TestResource", "my-namespace", "my-resource")).onComplete(context.succeeding(v -> context.verify(() -> {
        MeterRegistry registry = metrics.meterRegistry();
        Tag selectorTag = Tag.of("selector", "");
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").meter().getId().getTags().get(2), is(selectorTag));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations").tag("kind", "TestResource").counter().count(), is(1.0));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").meter().getId().getTags().get(2), is(selectorTag));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.successful").tag("kind", "TestResource").counter().count(), is(1.0));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resources.paused").meter().getId().getTags().get(2), is(selectorTag));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resources.paused").tag("kind", "TestResource").gauge().value(), is(1.0));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").meter().getId().getTags().get(2), is(selectorTag));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().count(), is(1L));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "reconciliations.duration").tag("kind", "TestResource").timer().totalTime(TimeUnit.MILLISECONDS), greaterThan(0.0));
        assertThat(registry.get(AbstractOperator.METRICS_PREFIX + "resource.state").tag("kind", "TestResource").tag("name", "my-resource").tag("resource-namespace", "my-namespace").gauge().value(), is(1.0));
        async.flag();
    })));
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) Status(io.strimzi.api.kafka.model.status.Status) VertxTestContext(io.vertx.junit5.VertxTestContext) CoreMatchers.is(org.hamcrest.CoreMatchers.is) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) MixedOperation(io.fabric8.kubernetes.client.dsl.MixedOperation) AfterAll(org.junit.jupiter.api.AfterAll) HashSet(java.util.HashSet) MicrometerMetricsOptions(io.vertx.micrometer.MicrometerMetricsOptions) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) BeforeAll(org.junit.jupiter.api.BeforeAll) Status(io.strimzi.api.kafka.model.status.Status) Collections.singletonMap(java.util.Collections.singletonMap) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Version(io.fabric8.kubernetes.model.annotation.Version) Tag(io.micrometer.core.instrument.Tag) Collections.emptyMap(java.util.Collections.emptyMap) MeterNotFoundException(io.micrometer.core.instrument.search.MeterNotFoundException) Collections.emptySet(java.util.Collections.emptySet) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) VertxOptions(io.vertx.core.VertxOptions) Set(java.util.Set) VertxPrometheusOptions(io.vertx.micrometer.VertxPrometheusOptions) VertxExtension(io.vertx.junit5.VertxExtension) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Spec(io.strimzi.api.kafka.model.Spec) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) TimeUnit(java.util.concurrent.TimeUnit) NamespaceAndName(io.strimzi.operator.common.model.NamespaceAndName) Labels(io.strimzi.operator.common.model.Labels) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) ObjectMeta(io.fabric8.kubernetes.api.model.ObjectMeta) AbstractWatchableStatusedResourceOperator(io.strimzi.operator.common.operator.resource.AbstractWatchableStatusedResourceOperator) Group(io.fabric8.kubernetes.model.annotation.Group) Checkpoint(io.vertx.junit5.Checkpoint) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) Condition(io.strimzi.api.kafka.model.status.Condition) Collections(java.util.Collections) CustomResource(io.fabric8.kubernetes.client.CustomResource) CustomResource(io.fabric8.kubernetes.client.CustomResource) MeterRegistry(io.micrometer.core.instrument.MeterRegistry) Checkpoint(io.vertx.junit5.Checkpoint) AbstractWatchableStatusedResourceOperator(io.strimzi.operator.common.operator.resource.AbstractWatchableStatusedResourceOperator) Tag(io.micrometer.core.instrument.Tag) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 42 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class ValidationVisitorTest method testValidationErrorsAreLogged.

@Test
public void testValidationErrorsAreLogged() {
    Kafka k = TestUtils.fromYaml("/example.yaml", Kafka.class, true);
    assertThat(k, is(notNullValue()));
    TestLogger logger = TestLogger.create(ValidationVisitorTest.class);
    HasMetadata resource = new KafkaBuilder().withNewMetadata().withName("testname").withNamespace("testnamespace").endMetadata().withApiVersion("v1beta2").build();
    Set<Condition> warningConditions = new HashSet<>();
    ResourceVisitor.visit(Reconciliation.DUMMY_RECONCILIATION, k, new ValidationVisitor(resource, logger, warningConditions));
    List<String> warningMessages = warningConditions.stream().map(Condition::getMessage).collect(Collectors.toList());
    assertThat(warningMessages, hasItem("Contains object at path spec.kafka with an unknown property: foo"));
    assertThat(warningMessages, hasItem("In API version v1beta2 the enableECDSA property at path spec.kafka.listeners.auth.enableECDSA has been deprecated."));
    assertThat(warningMessages, hasItem("In API version v1beta2 the service property at path spec.kafkaExporter.template.service has been deprecated. " + "The Kafka Exporter service has been removed."));
    logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "Contains object at path spec.kafka with an unknown property: foo"));
    logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1beta2 the enableECDSA property at path spec.kafka.listeners.auth.enableECDSA has been deprecated."));
    logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1beta2 the service property at path spec.kafkaExporter.template.service has been deprecated. " + "The Kafka Exporter service has been removed."));
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) HasMetadata(io.fabric8.kubernetes.api.model.HasMetadata) Kafka(io.strimzi.api.kafka.model.Kafka) KafkaBuilder(io.strimzi.api.kafka.model.KafkaBuilder) TestLogger(io.strimzi.test.logging.TestLogger) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 43 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi by strimzi.

the class LoggingChangeST method testNotExistingCMSetsDefaultLogging.

@ParallelNamespaceTest
void testNotExistingCMSetsDefaultLogging(ExtensionContext extensionContext) {
    final String namespaceName = StUtils.getNamespaceBasedOnRbac(namespace, extensionContext);
    final String defaultProps = TestUtils.getFileAsString(TestUtils.USER_PATH + "/../cluster-operator/src/main/resources/kafkaDefaultLoggingProperties");
    final String clusterName = mapWithClusterNames.get(extensionContext.getDisplayName());
    final LabelSelector kafkaSelector = KafkaResource.getLabelSelector(clusterName, KafkaResources.kafkaStatefulSetName(clusterName));
    String cmData = "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender\n" + "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout\n" + "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} %p %m (%c) [%t]%n\n" + "log4j.rootLogger=INFO, CONSOLE\n" + "log4j.logger.org.I0Itec.zkclient.ZkClient=INFO\n" + "log4j.logger.org.apache.zookeeper=INFO\n" + "log4j.logger.kafka=INFO\n" + "log4j.logger.org.apache.kafka=INFO";
    String existingCmName = "external-cm";
    String nonExistingCmName = "non-existing-cm-name";
    ConfigMap configMap = new ConfigMapBuilder().withNewMetadata().withName(existingCmName).withNamespace(namespaceName).endMetadata().withData(Collections.singletonMap("log4j.properties", cmData)).build();
    kubeClient().getClient().configMaps().inNamespace(namespaceName).createOrReplace(configMap);
    LOGGER.info("Deploying Kafka with custom logging");
    resourceManager.createResource(extensionContext, KafkaTemplates.kafkaPersistent(clusterName, 3, 1).editOrNewSpec().editKafka().withExternalLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName(existingCmName).withOptional(false).build()).endValueFrom().build()).endKafka().endSpec().build());
    String kafkaSsName = KafkaResources.kafkaStatefulSetName(clusterName);
    Map<String, String> kafkaPods = PodUtils.podSnapshot(namespaceName, kafkaSelector);
    String log4jFile = cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.DEBUG, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "cat custom-config/log4j.properties").out();
    assertTrue(log4jFile.contains(cmData));
    LOGGER.info("Changing external logging's CM to not existing one");
    KafkaResource.replaceKafkaResourceInSpecificNamespace(clusterName, kafka -> kafka.getSpec().getKafka().setLogging(new ExternalLoggingBuilder().withNewValueFrom().withConfigMapKeyRef(new ConfigMapKeySelectorBuilder().withKey("log4j.properties").withName(nonExistingCmName).withOptional(false).build()).endValueFrom().build()), namespaceName);
    RollingUpdateUtils.waitForNoRollingUpdate(namespaceName, kafkaSelector, kafkaPods);
    LOGGER.info("Checking that log4j.properties in custom-config isn't empty and configuration is default");
    log4jFile = cmdKubeClient().namespace(namespaceName).execInPodContainer(Level.DEBUG, KafkaResources.kafkaPodName(clusterName, 0), "kafka", "/bin/bash", "-c", "cat custom-config/log4j.properties").out();
    assertFalse(log4jFile.isEmpty());
    assertTrue(log4jFile.contains(cmData));
    assertFalse(log4jFile.contains(defaultProps));
    LOGGER.info("Checking if Kafka:{} contains error about non-existing CM", clusterName);
    Condition condition = KafkaResource.kafkaClient().inNamespace(namespaceName).withName(clusterName).get().getStatus().getConditions().get(0);
    assertThat(condition.getType(), is(CustomResourceStatus.NotReady.toString()));
    assertTrue(condition.getMessage().matches("ConfigMap " + nonExistingCmName + " with external logging configuration does not exist .*"));
}
Also used : ExternalLoggingBuilder(io.strimzi.api.kafka.model.ExternalLoggingBuilder) Condition(io.strimzi.api.kafka.model.status.Condition) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ConfigMapKeySelectorBuilder(io.fabric8.kubernetes.api.model.ConfigMapKeySelectorBuilder) ConfigMapBuilder(io.fabric8.kubernetes.api.model.ConfigMapBuilder) LabelSelector(io.fabric8.kubernetes.api.model.LabelSelector) ParallelNamespaceTest(io.strimzi.systemtest.annotations.ParallelNamespaceTest)

Example 44 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class KafkaMirrorMaker2AssemblyOperator method createOrUpdate.

@Override
protected Future<KafkaMirrorMaker2Status> createOrUpdate(Reconciliation reconciliation, KafkaMirrorMaker2 kafkaMirrorMaker2) {
    KafkaMirrorMaker2Cluster mirrorMaker2Cluster;
    KafkaMirrorMaker2Status kafkaMirrorMaker2Status = new KafkaMirrorMaker2Status();
    try {
        mirrorMaker2Cluster = KafkaMirrorMaker2Cluster.fromCrd(reconciliation, kafkaMirrorMaker2, versions);
    } catch (Exception e) {
        LOGGER.warnCr(reconciliation, e);
        StatusUtils.setStatusConditionAndObservedGeneration(kafkaMirrorMaker2, kafkaMirrorMaker2Status, Future.failedFuture(e));
        return Future.failedFuture(new ReconciliationException(kafkaMirrorMaker2Status, e));
    }
    Promise<KafkaMirrorMaker2Status> createOrUpdatePromise = Promise.promise();
    String namespace = reconciliation.namespace();
    Map<String, String> annotations = new HashMap<>(1);
    final AtomicReference<String> desiredLogging = new AtomicReference<>();
    boolean mirrorMaker2HasZeroReplicas = mirrorMaker2Cluster.getReplicas() == 0;
    String initCrbName = KafkaMirrorMaker2Resources.initContainerClusterRoleBindingName(kafkaMirrorMaker2.getMetadata().getName(), namespace);
    ClusterRoleBinding initCrb = mirrorMaker2Cluster.generateClusterRoleBinding();
    LOGGER.debugCr(reconciliation, "Updating Kafka MirrorMaker 2.0 cluster");
    connectServiceAccount(reconciliation, namespace, KafkaMirrorMaker2Resources.serviceAccountName(mirrorMaker2Cluster.getCluster()), mirrorMaker2Cluster).compose(i -> connectInitClusterRoleBinding(reconciliation, initCrbName, initCrb)).compose(i -> connectNetworkPolicy(reconciliation, namespace, mirrorMaker2Cluster, true)).compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.getReplicas())).compose(i -> serviceOperations.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getServiceName(), mirrorMaker2Cluster.generateService())).compose(i -> generateMetricsAndLoggingConfigMap(reconciliation, namespace, mirrorMaker2Cluster)).compose(logAndMetricsConfigMap -> {
        String logging = logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG);
        annotations.put(Annotations.ANNO_STRIMZI_LOGGING_DYNAMICALLY_UNCHANGEABLE_HASH, Util.hashStub(Util.getLoggingDynamicallyUnmodifiableEntries(logging)));
        desiredLogging.set(logging);
        return configMapOperations.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getAncillaryConfigMapName(), logAndMetricsConfigMap);
    }).compose(i -> kafkaConnectJmxSecret(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster)).compose(i -> pfa.hasPodDisruptionBudgetV1() ? podDisruptionBudgetOperator.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generatePodDisruptionBudget()) : Future.succeededFuture()).compose(i -> !pfa.hasPodDisruptionBudgetV1() ? podDisruptionBudgetV1Beta1Operator.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generatePodDisruptionBudgetV1Beta1()) : Future.succeededFuture()).compose(i -> generateAuthHash(namespace, kafkaMirrorMaker2.getSpec())).compose(hash -> {
        if (hash != null) {
            annotations.put(Annotations.ANNO_STRIMZI_AUTH_HASH, Integer.toString(hash));
        }
        Deployment deployment = mirrorMaker2Cluster.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets);
        return deploymentOperations.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getName(), deployment);
    }).compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.getReplicas())).compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, mirrorMaker2Cluster.getName(), 1_000, operationTimeoutMs)).compose(i -> mirrorMaker2HasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, mirrorMaker2Cluster.getName(), 1_000, operationTimeoutMs)).compose(i -> mirrorMaker2HasZeroReplicas ? Future.succeededFuture() : reconcileConnectors(reconciliation, kafkaMirrorMaker2, mirrorMaker2Cluster, kafkaMirrorMaker2Status, desiredLogging.get())).map((Void) null).onComplete(reconciliationResult -> {
        List<Condition> conditions = kafkaMirrorMaker2Status.getConditions();
        StatusUtils.setStatusConditionAndObservedGeneration(kafkaMirrorMaker2, kafkaMirrorMaker2Status, reconciliationResult);
        if (!mirrorMaker2HasZeroReplicas) {
            kafkaMirrorMaker2Status.setUrl(KafkaMirrorMaker2Resources.url(mirrorMaker2Cluster.getCluster(), namespace, KafkaMirrorMaker2Cluster.REST_API_PORT));
        }
        if (conditions != null && !conditions.isEmpty()) {
            kafkaMirrorMaker2Status.addConditions(conditions);
        }
        kafkaMirrorMaker2Status.setReplicas(mirrorMaker2Cluster.getReplicas());
        kafkaMirrorMaker2Status.setLabelSelector(mirrorMaker2Cluster.getSelectorLabels().toSelectorString());
        if (reconciliationResult.succeeded()) {
            createOrUpdatePromise.complete(kafkaMirrorMaker2Status);
        } else {
            createOrUpdatePromise.fail(new ReconciliationException(kafkaMirrorMaker2Status, reconciliationResult.cause()));
        }
    });
    return createOrUpdatePromise.future();
}
Also used : ReconciliationException(io.strimzi.operator.common.ReconciliationException) KafkaMirrorMaker2Builder(io.strimzi.api.kafka.model.KafkaMirrorMaker2Builder) Annotations(io.strimzi.operator.common.Annotations) ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK_PATTERN_TASK(io.strimzi.operator.common.Annotations.ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK_PATTERN_TASK) Resource(io.fabric8.kubernetes.client.dsl.Resource) Matcher(java.util.regex.Matcher) Map(java.util.Map) KafkaConnectorSpec(io.strimzi.api.kafka.model.KafkaConnectorSpec) ReconciliationException(io.strimzi.operator.common.ReconciliationException) ResourceOperatorSupplier(io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier) AbstractModel(io.strimzi.operator.cluster.model.AbstractModel) ModelUtils(io.strimzi.operator.cluster.model.ModelUtils) StatusUtils(io.strimzi.operator.common.operator.resource.StatusUtils) ANNO_STRIMZI_IO_RESTART_CONNECTOR(io.strimzi.operator.common.Annotations.ANNO_STRIMZI_IO_RESTART_CONNECTOR) CertSecretSource(io.strimzi.api.kafka.model.CertSecretSource) KafkaMirrorMaker2ConnectorSpec(io.strimzi.api.kafka.model.KafkaMirrorMaker2ConnectorSpec) KafkaMirrorMaker2Spec(io.strimzi.api.kafka.model.KafkaMirrorMaker2Spec) DeploymentOperator(io.strimzi.operator.common.operator.resource.DeploymentOperator) KafkaVersion(io.strimzi.operator.cluster.model.KafkaVersion) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) Serializable(java.io.Serializable) KafkaMirrorMaker2Resources(io.strimzi.api.kafka.model.KafkaMirrorMaker2Resources) KafkaClientAuthenticationScramSha256(io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationScramSha256) KafkaMirrorMaker2List(io.strimzi.api.kafka.KafkaMirrorMaker2List) KafkaMirrorMaker2(io.strimzi.api.kafka.model.KafkaMirrorMaker2) KafkaMirrorMaker2Status(io.strimzi.api.kafka.model.status.KafkaMirrorMaker2Status) List(java.util.List) KafkaConnectCluster(io.strimzi.operator.cluster.model.KafkaConnectCluster) Stream(java.util.stream.Stream) Condition(io.strimzi.api.kafka.model.status.Condition) PlatformFeaturesAvailability(io.strimzi.operator.PlatformFeaturesAvailability) ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK_PATTERN(io.strimzi.operator.common.Annotations.ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK_PATTERN) ClusterOperatorConfig(io.strimzi.operator.cluster.ClusterOperatorConfig) CustomResource(io.fabric8.kubernetes.client.CustomResource) ClusterRoleBinding(io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding) AuthenticationUtils(io.strimzi.operator.cluster.model.AuthenticationUtils) HashMap(java.util.HashMap) KafkaClientAuthenticationPlain(io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationPlain) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK(io.strimzi.operator.common.Annotations.ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK) CompositeFuture(io.vertx.core.CompositeFuture) KafkaClientAuthenticationOAuth(io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationOAuth) KafkaClientAuthenticationTls(io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationTls) SaslConfigs(org.apache.kafka.common.config.SaslConfigs) SslConfigs(org.apache.kafka.common.config.SslConfigs) ReconciliationLogger(io.strimzi.operator.common.ReconciliationLogger) Collections.emptyMap(java.util.Collections.emptyMap) KafkaMirrorMaker2ClusterSpec(io.strimzi.api.kafka.model.KafkaMirrorMaker2ClusterSpec) InvalidResourceException(io.strimzi.operator.cluster.model.InvalidResourceException) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) Promise(io.vertx.core.Promise) Vertx(io.vertx.core.Vertx) KafkaMirrorMaker2MirrorSpec(io.strimzi.api.kafka.model.KafkaMirrorMaker2MirrorSpec) KafkaMirrorMaker2Cluster(io.strimzi.operator.cluster.model.KafkaMirrorMaker2Cluster) ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK_PATTERN_CONNECTOR(io.strimzi.operator.common.Annotations.ANNO_STRIMZI_IO_RESTART_CONNECTOR_TASK_PATTERN_CONNECTOR) KafkaClientAuthenticationScramSha512(io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationScramSha512) Reconciliation(io.strimzi.operator.common.Reconciliation) Util(io.strimzi.operator.common.Util) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) Comparator(java.util.Comparator) Collections(java.util.Collections) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) KafkaConnectorSpecBuilder(io.strimzi.api.kafka.model.KafkaConnectorSpecBuilder) Condition(io.strimzi.api.kafka.model.status.Condition) HashMap(java.util.HashMap) ClusterRoleBinding(io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding) KafkaMirrorMaker2Cluster(io.strimzi.operator.cluster.model.KafkaMirrorMaker2Cluster) KafkaMirrorMaker2Status(io.strimzi.api.kafka.model.status.KafkaMirrorMaker2Status) Deployment(io.fabric8.kubernetes.api.model.apps.Deployment) AtomicReference(java.util.concurrent.atomic.AtomicReference) ReconciliationException(io.strimzi.operator.common.ReconciliationException) InvalidResourceException(io.strimzi.operator.cluster.model.InvalidResourceException)

Example 45 with Condition

use of io.strimzi.api.kafka.model.status.Condition in project strimzi-kafka-operator by strimzi.

the class KafkaRebalanceAssemblyOperator method buildRebalanceStatus.

private MapAndStatus<ConfigMap, KafkaRebalanceStatus> buildRebalanceStatus(KafkaRebalance kafkaRebalance, String sessionID, KafkaRebalanceState cruiseControlState, JsonObject proposalJson, Set<Condition> validation) {
    List<Condition> conditions = new ArrayList<>();
    conditions.add(StatusUtils.buildRebalanceCondition(cruiseControlState.toString()));
    conditions.addAll(validation);
    MapAndStatus<ConfigMap, Map<String, Object>> optimizationProposalMapAndStatus = processOptimizationProposal(kafkaRebalance, proposalJson);
    return new MapAndStatus<>(optimizationProposalMapAndStatus.getLoadMap(), new KafkaRebalanceStatusBuilder().withSessionId(sessionID).withConditions(conditions).withOptimizationResult(optimizationProposalMapAndStatus.getStatus()).build());
}
Also used : Condition(io.strimzi.api.kafka.model.status.Condition) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap) ArrayList(java.util.ArrayList) KafkaRebalanceStatusBuilder(io.strimzi.api.kafka.model.status.KafkaRebalanceStatusBuilder) Map(java.util.Map) HashMap(java.util.HashMap) ConfigMap(io.fabric8.kubernetes.api.model.ConfigMap)

Aggregations

Condition (io.strimzi.api.kafka.model.status.Condition)150 Test (org.junit.jupiter.api.Test)70 Kafka (io.strimzi.api.kafka.model.Kafka)61 HashMap (java.util.HashMap)49 EphemeralStorage (io.strimzi.api.kafka.model.storage.EphemeralStorage)42 MatcherAssert.assertThat (org.hamcrest.MatcherAssert.assertThat)38 Collections (java.util.Collections)37 Vertx (io.vertx.core.Vertx)36 Map (java.util.Map)36 KafkaBuilder (io.strimzi.api.kafka.model.KafkaBuilder)34 Labels (io.strimzi.operator.common.model.Labels)34 Future (io.vertx.core.Future)34 BeforeAll (org.junit.jupiter.api.BeforeAll)32 Reconciliation (io.strimzi.operator.common.Reconciliation)26 Checkpoint (io.vertx.junit5.Checkpoint)26 VertxExtension (io.vertx.junit5.VertxExtension)26 VertxTestContext (io.vertx.junit5.VertxTestContext)26 AfterAll (org.junit.jupiter.api.AfterAll)26 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)26 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)25