Search in sources :

Example 6 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class StrimziManager method getStrimziVersions.

/**
 * @return list of installed Strimzi versions with related readiness status. it will not
 * include versions that may be removed or non-common versions that are pending installation.
 * Common versions are those found in both an old and a new CSV.
 */
public List<StrimziVersionStatus> getStrimziVersions() {
    Map<String, StrimziVersionStatus> nextVersions = new HashMap<>(strimziPendingInstallationVersions);
    Map<String, StrimziVersionStatus> result = this.strimziVersions;
    // if there are pending versions, then merge the lists by keeping only the valid next
    if (!nextVersions.isEmpty()) {
        result = nextVersions;
        for (Iterator<Map.Entry<String, StrimziVersionStatus>> iter = result.entrySet().iterator(); iter.hasNext(); ) {
            Map.Entry<String, StrimziVersionStatus> entry = iter.next();
            StrimziVersionStatus live = this.strimziVersions.get(entry.getKey());
            if (live != null) {
                entry.setValue(live);
            } else if (entry.getValue() == EMPTY_STATUS) {
                iter.remove();
            }
        }
    }
    return new ArrayList<>(result.values());
}
Also used : StrimziVersionStatus(org.bf2.operator.resources.v1alpha1.StrimziVersionStatus) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 7 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class Exec method exec.

/**
 * Method executes external command
 *
 * @param command     arguments for command
 * @param envVars
 * @param timeout     timeout for execution
 * @param logToOutput log output or not
 * @param throwErrors look for errors in output and throws exception if true
 * @return execution results
 */
public static ExecResult exec(String input, List<String> command, Set<EnvVar> envVars, int timeout, boolean logToOutput, boolean throwErrors) {
    int ret = 1;
    ExecResult execResult;
    try {
        Exec executor = new Exec();
        LOGGER.info("Command: {}", String.join(" ", command));
        ret = executor.execute(input, command, envVars, timeout);
        synchronized (LOCK) {
            if (logToOutput) {
                LOGGER.info("RETURN code: {}", ret);
                if (!executor.out().isEmpty()) {
                    LOGGER.info("======STDOUT START=======");
                    LOGGER.info("{}", cutExecutorLog(executor.out()));
                    LOGGER.info("======STDOUT END======");
                }
                if (!executor.err().isEmpty()) {
                    LOGGER.info("======STDERR START=======");
                    LOGGER.info("{}", cutExecutorLog(executor.err()));
                    LOGGER.info("======STDERR END======");
                }
            }
        }
        execResult = new ExecResult(ret, executor.out(), executor.err());
        if (throwErrors && ret != 0) {
            String msg = "`" + join(" ", command) + "` got status code " + ret + " and stderr:\n------\n" + executor.stdErr + "\n------\nand stdout:\n------\n" + executor.stdOut + "\n------";
            Matcher matcher = ERROR_PATTERN.matcher(executor.err());
            KubeClusterException t = null;
            if (matcher.find()) {
                switch(matcher.group(1)) {
                    case "NotFound":
                        t = new KubeClusterException.NotFound(execResult, msg);
                        break;
                    case "AlreadyExists":
                        t = new KubeClusterException.AlreadyExists(execResult, msg);
                        break;
                    default:
                        break;
                }
            }
            matcher = INVALID_PATTERN.matcher(executor.err());
            if (matcher.find()) {
                t = new KubeClusterException.InvalidResource(execResult, msg);
            }
            if (t == null) {
                t = new KubeClusterException(execResult, msg);
            }
            throw t;
        }
        return new ExecResult(ret, executor.out(), executor.err());
    } catch (IOException | ExecutionException e) {
        throw new KubeClusterException(e);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new KubeClusterException(e);
    }
}
Also used : Matcher(java.util.regex.Matcher) IOException(java.io.IOException) KubeClusterException(org.bf2.test.k8s.KubeClusterException) ExecutionException(java.util.concurrent.ExecutionException)

Example 8 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaAgentController method buildStatus.

/**
 * TODO: this needs to be replaced with actual metrics
 * @return
 */
private ManagedKafkaAgentStatus buildStatus(ManagedKafkaAgent resource) {
    ManagedKafkaAgentStatus status = resource.getStatus();
    ManagedKafkaCondition readyCondition = null;
    if (status != null) {
        readyCondition = ConditionUtils.findManagedKafkaCondition(status.getConditions(), Type.Ready).orElse(null);
    }
    List<StrimziVersionStatus> strimziVersions = this.strimziManager.getStrimziVersions();
    log.debugf("Strimzi versions %s", strimziVersions);
    // consider the fleetshard operator ready when observability is running and a Strimzi bundle is installed (aka at least one available version)
    Status statusValue = this.observabilityManager.isObservabilityRunning() && !strimziVersions.isEmpty() ? ManagedKafkaCondition.Status.True : ManagedKafkaCondition.Status.False;
    if (readyCondition == null) {
        readyCondition = ConditionUtils.buildCondition(ManagedKafkaCondition.Type.Ready, statusValue);
    } else {
        ConditionUtils.updateConditionStatus(readyCondition, statusValue, null, null);
    }
    ClusterCapacity total = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
    ClusterCapacity remaining = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
    ClusterCapacity delta = new ClusterCapacityBuilder().withConnections(10000).withDataRetentionSize(Quantity.parse("40Gi")).withIngressEgressThroughputPerSec(Quantity.parse("40Gi")).withPartitions(10000).build();
    NodeCounts nodeInfo = new NodeCountsBuilder().withCeiling(0).withCurrent(0).withCurrentWorkLoadMinimum(0).withFloor(0).build();
    ClusterResizeInfo resize = new ClusterResizeInfoBuilder().withDelta(delta).withNodeDelta(3).build();
    return new ManagedKafkaAgentStatusBuilder().withConditions(status == null ? Arrays.asList(readyCondition) : status.getConditions()).withTotal(total).withRemaining(remaining).withNodeInfo(nodeInfo).withResizeInfo(resize).withUpdatedTimestamp(ConditionUtils.iso8601Now()).withStrimzi(strimziVersions).build();
}
Also used : Status(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status) StrimziVersionStatus(org.bf2.operator.resources.v1alpha1.StrimziVersionStatus) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaAgentStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatusBuilder) StrimziVersionStatus(org.bf2.operator.resources.v1alpha1.StrimziVersionStatus) ClusterResizeInfoBuilder(org.bf2.operator.resources.v1alpha1.ClusterResizeInfoBuilder) ClusterCapacity(org.bf2.operator.resources.v1alpha1.ClusterCapacity) ClusterCapacityBuilder(org.bf2.operator.resources.v1alpha1.ClusterCapacityBuilder) NodeCountsBuilder(org.bf2.operator.resources.v1alpha1.NodeCountsBuilder) NodeCounts(org.bf2.operator.resources.v1alpha1.NodeCounts) ManagedKafkaCondition(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition) ClusterResizeInfo(org.bf2.operator.resources.v1alpha1.ClusterResizeInfo) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus)

Example 9 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class OperatorST method testResizeAndCapacity.

@ParallelTest
void testResizeAndCapacity(ExtensionContext extensionContext) throws Exception {
    String mkAppName = "mk-test-resize-capacity";
    LOGGER.info("Create namespace");
    resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
    LOGGER.info("Create managedkafka");
    ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, null, latestStrimziVersion, latestKafkaVersion);
    Quantity quantity = Quantity.parse("100Gi");
    // for values below 270Gi, the logic will report a slightly larger values
    Quantity reportedQuantity = Quantity.parse("103Gi");
    mk.getSpec().getCapacity().setMaxDataRetentionSize(quantity);
    mk = resourceManager.createResource(extensionContext, mk);
    Resource<ManagedKafka> mkResource = kube.client().resources(ManagedKafka.class).inNamespace(mk.getMetadata().getNamespace()).withName(mk.getMetadata().getName());
    assertEquals(reportedQuantity, mk.getStatus().getCapacity().getMaxDataRetentionSize());
    LOGGER.info("Trying to shrink");
    mk.getSpec().getCapacity().setMaxDataRetentionSize(Quantity.parse("50Gi"));
    mk = mkResource.createOrReplace(mk);
    String currentVersion = mk.getMetadata().getResourceVersion();
    // wait until the status is updated
    mk = mkResource.waitUntilCondition(m -> !Objects.equals(currentVersion, m.getMetadata().getResourceVersion()), 2, TimeUnit.MINUTES);
    // should be the same, as we can't resize down
    assertEquals(reportedQuantity, mk.getStatus().getCapacity().getMaxDataRetentionSize());
    LOGGER.info("Trying to grow");
    mk.getSpec().getCapacity().setMaxDataRetentionSize(Quantity.parse("200Gi"));
    mk = mkResource.createOrReplace(mk);
    // should grow - again the size is a little off
    Quantity endReportedQuantity = Quantity.parse("202Gi");
    mk = mkResource.waitUntilCondition(m -> endReportedQuantity.equals(m.getStatus().getCapacity().getMaxDataRetentionSize()), 5, TimeUnit.MINUTES);
}
Also used : Quantity(io.fabric8.kubernetes.api.model.Quantity) StrimziOperatorManager(org.bf2.systemtest.operator.StrimziOperatorManager) SyncApiClient(org.bf2.systemtest.api.sync.SyncApiClient) SequentialTest(org.bf2.systemtest.framework.SequentialTest) ManagedKafkaAgentBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentBuilder) CompletableFuture(java.util.concurrent.CompletableFuture) ManagedKafkaAgentSpecBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentSpecBuilder) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) AfterAll(org.junit.jupiter.api.AfterAll) Resource(io.fabric8.kubernetes.client.dsl.Resource) AbstractST(org.bf2.systemtest.integration.AbstractST) BeforeAll(org.junit.jupiter.api.BeforeAll) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) ManagedKafkaAgentResourceClient(org.bf2.common.ManagedKafkaAgentResourceClient) FleetShardOperatorManager(org.bf2.systemtest.operator.FleetShardOperatorManager) ManagedKafkaResourceType(org.bf2.systemtest.framework.resource.ManagedKafkaResourceType) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Logger(org.apache.logging.log4j.Logger) ManagedKafkaCondition(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ParallelTest(org.bf2.systemtest.framework.ParallelTest) LogManager(org.apache.logging.log4j.LogManager) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Quantity(io.fabric8.kubernetes.api.model.Quantity) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ParallelTest(org.bf2.systemtest.framework.ParallelTest)

Example 10 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ControlPlane method updateAgentStatus.

private void updateAgentStatus() {
    log.debug("Updating agent status");
    executorService.execute(() -> {
        ManagedKafkaAgent localManagedKafkaAgent = localLookup.getLocalManagedKafkaAgent();
        if (localManagedKafkaAgent == null) {
            return;
        }
        ManagedKafkaAgentStatus status = localManagedKafkaAgent.getStatus();
        if (status == null) {
            // as they are not looking for this sync yet as a heartbeat
            return;
        }
        controlPlaneClient.updateStatus(id, status);
    });
}
Also used : ManagedKafkaAgent(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgent) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus)

Aggregations

Test (org.junit.jupiter.api.Test)14 ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)12 Objects (java.util.Objects)11 QuarkusTest (io.quarkus.test.junit.QuarkusTest)9 ManagedConnectorBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorBuilder)7 ManagedConnectorSpecBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorSpecBuilder)7 Map (java.util.Map)6 Inject (javax.inject.Inject)6 OperatorSelectorBuilder (org.bf2.cos.fleetshard.api.OperatorSelectorBuilder)6 RegistryData (org.bf2.srs.fleetmanager.storage.sqlPanacheImpl.model.RegistryData)6 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)6 ObjectMetaBuilder (io.fabric8.kubernetes.api.model.ObjectMetaBuilder)5 List (java.util.List)5 ConnectorDeploymentStatus (org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatus)5 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)5 Quantity (io.fabric8.kubernetes.api.model.Quantity)4 KafkaConnectorBuilder (io.strimzi.api.kafka.model.KafkaConnectorBuilder)4 Transactional (javax.transaction.Transactional)4 ManagedKafkaCondition (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition)4 ManagedKafkaStatus (org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus)4