Search in sources :

Example 46 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ObservabilityManagerTest method testObservabilitySecret.

@Test
public void testObservabilitySecret() {
    client.getConfiguration().setNamespace("test");
    ObservabilityConfiguration config = new ObservabilityConfigurationBuilder().withAccessToken("test-token").withChannel("test").withRepository("test-repo").withTag("tag").build();
    String ownerName = "SampleOwner";
    Secret owner = client.secrets().inNamespace(client.getNamespace()).withName(ownerName).create(new SecretBuilder().withNewMetadata().withNamespace(client.getNamespace()).withName(ownerName).endMetadata().addToData("key", "value").build());
    this.observabilityManager.createOrUpdateObservabilitySecret(config, owner);
    // lets call event handler
    Secret secret = observabilityManager.observabilitySecretResource().get();
    assertNotNull(secret);
    // the mock informermanager should be immediately updated, but it should
    // not be seen as running
    assertNotNull(observabilityManager.cachedObservabilitySecret());
    assertFalse(observabilityManager.isObservabilityRunning());
    assertFalse(secret.getMetadata().getOwnerReferences().isEmpty());
    ObservabilityConfiguration secretConfig = new ObservabilityConfigurationBuilder().withAccessToken(new String(decoder.decode(secret.getData().get(ObservabilityManager.OBSERVABILITY_ACCESS_TOKEN)))).withChannel(new String(decoder.decode(secret.getData().get(ObservabilityManager.OBSERVABILITY_CHANNEL)))).withTag(new String(decoder.decode(secret.getData().get(ObservabilityManager.OBSERVABILITY_TAG)))).withRepository(new String(decoder.decode(secret.getData().get(ObservabilityManager.OBSERVABILITY_REPOSITORY)))).build();
    // secret verification
    assertEquals(secretConfig, config);
    assertEquals("observability-operator", secret.getMetadata().getLabels().get("configures"));
    // status verification, the Informers do not work in test framework thus direct verification
    secret = ObservabilityManager.createObservabilitySecretBuilder(client.getNamespace(), config).editMetadata().addToAnnotations(ObservabilityManager.OBSERVABILITY_OPERATOR_STATUS, ObservabilityManager.ACCEPTED).endMetadata().build();
    observabilityManager.observabilitySecretResource().createOrReplace(secret);
    secret = observabilityManager.observabilitySecretResource().get();
    assertTrue(ObservabilityManager.isObservabilityStatusAccepted(secret));
    this.observabilityManager.createOrUpdateObservabilitySecret(config, owner);
    // no-op update and make sure the flag is not flipped
    secret = observabilityManager.observabilitySecretResource().get();
    assertTrue(ObservabilityManager.isObservabilityStatusAccepted(secret));
}
Also used : Secret(io.fabric8.kubernetes.api.model.Secret) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) ObservabilityConfiguration(org.bf2.operator.resources.v1alpha1.ObservabilityConfiguration) ObservabilityConfigurationBuilder(org.bf2.operator.resources.v1alpha1.ObservabilityConfigurationBuilder) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 47 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class OperatorST method deploy.

@BeforeAll
void deploy() throws Exception {
    strimziOperatorManager = new OlmBasedStrimziOperatorManager(kube, StrimziOperatorManager.OPERATOR_NS);
    CompletableFuture.allOf(strimziOperatorManager.deployStrimziOperator(), FleetShardOperatorManager.deployFleetShardOperator(kube)).join();
    // since sync is not installed, manually create the agent resource
    var agentResource = kube.client().resource(new ManagedKafkaAgentBuilder().withNewMetadata().withName(ManagedKafkaAgentResourceClient.RESOURCE_NAME).withNamespace(FleetShardOperatorManager.OPERATOR_NS).endMetadata().withSpec(new ManagedKafkaAgentSpecBuilder().withNewObservability().withAccessToken("").withChannel("").withRepository("").withTag("").endObservability().build()).build());
    agentResource.createOrReplace();
    // the operator will update the status after a while
    strimziVersions = SyncApiClient.getSortedAvailableStrimziVersions(() -> agentResource.fromServer().get().getStatus()).collect(Collectors.toList());
    assertTrue(strimziVersions.size() > 1);
    latestStrimziVersion = strimziVersions.get(strimziVersions.size() - 1);
    latestKafkaVersion = SyncApiClient.getLatestAvailableKafkaVersion(() -> agentResource.fromServer().get().getStatus(), latestStrimziVersion);
}
Also used : ManagedKafkaAgentSpecBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentSpecBuilder) ManagedKafkaAgentBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentBuilder) BeforeAll(org.junit.jupiter.api.BeforeAll)

Example 48 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaSync method syncKafkaClusters.

/**
 * Update the local state based upon the remote ManagedKafkas
 * The strategy here is to take a pass over the list and find any deferred work
 * Then execute that deferred work using the {@link ManagedExecutor} but with
 * a refresh of the state to ensure we're still acting appropriately.
 */
@Timed(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The time spent processing polling calls")
@Counted(value = "sync.poll", extraTags = { "resource", "ManagedKafka" }, description = "The number of polling calls")
public void syncKafkaClusters() {
    Map<String, ManagedKafka> remotes = new HashMap<>();
    for (ManagedKafka remoteManagedKafka : controlPlane.getKafkaClusters()) {
        Objects.requireNonNull(remoteManagedKafka.getId());
        Objects.requireNonNull(remoteManagedKafka.getMetadata().getNamespace());
        remotes.put(ControlPlane.managedKafkaKey(remoteManagedKafka), remoteManagedKafka);
        ManagedKafkaSpec remoteSpec = remoteManagedKafka.getSpec();
        Objects.requireNonNull(remoteSpec);
        String localKey = Cache.namespaceKeyFunc(remoteManagedKafka.getMetadata().getNamespace(), remoteManagedKafka.getMetadata().getName());
        ManagedKafka existing = lookup.getLocalManagedKafka(localKey);
        if (existing == null) {
            if (!remoteSpec.isDeleted()) {
                reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
            } else {
                // we've successfully removed locally, but control plane is not aware
                // we need to send another status update to let them know
                ManagedKafkaStatusBuilder statusBuilder = new ManagedKafkaStatusBuilder();
                statusBuilder.withConditions(ConditionUtils.buildCondition(Type.Ready, Status.False).reason(Reason.Deleted));
                // fire and forget the async call - if it fails, we'll retry on the next poll
                controlPlane.updateKafkaClusterStatus(() -> {
                    return Map.of(remoteManagedKafka.getId(), statusBuilder.build());
                });
            }
        } else {
            final String localNamespace = existing.getMetadata().getNamespace();
            final String managedKafkaId = existing.getMetadata().getAnnotations() == null ? null : existing.getMetadata().getAnnotations().get(MANAGEDKAFKA_ID_LABEL);
            Namespace n = kubeClient.namespaces().withName(localNamespace).get();
            if (n != null) {
                String namespaceLabel = Optional.ofNullable(n.getMetadata().getLabels()).map(m -> m.get(MANAGEDKAFKA_ID_NAMESPACE_LABEL)).orElse("");
                if (managedKafkaId != null && !namespaceLabel.equals(managedKafkaId)) {
                    kubeClient.namespaces().withName(localNamespace).edit(namespace -> new NamespaceBuilder(namespace).editMetadata().addToLabels(MANAGEDKAFKA_ID_NAMESPACE_LABEL, managedKafkaId).endMetadata().build());
                }
            }
            if (specChanged(remoteSpec, existing) || !Objects.equals(existing.getPlacementId(), remoteManagedKafka.getPlacementId())) {
                reconcileAsync(ControlPlane.managedKafkaKey(remoteManagedKafka), localKey);
            }
        }
    }
    // process final removals
    for (ManagedKafka local : lookup.getLocalManagedKafkas()) {
        if (remotes.get(ControlPlane.managedKafkaKey(local)) != null || !deleteAllowed(local)) {
            continue;
        }
        reconcileAsync(null, Cache.metaNamespaceKeyFunc(local));
    }
}
Also used : ManagedKafkaResourceClient(org.bf2.common.ManagedKafkaResourceClient) HttpURLConnection(java.net.HttpURLConnection) Status(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status) Timed(io.micrometer.core.annotation.Timed) Logger(org.jboss.logging.Logger) Cache(io.fabric8.kubernetes.client.informers.cache.Cache) HashMap(java.util.HashMap) Inject(javax.inject.Inject) ControlPlane(org.bf2.sync.controlplane.ControlPlane) Map(java.util.Map) ExecutorService(java.util.concurrent.ExecutorService) KubernetesClientException(io.fabric8.kubernetes.client.KubernetesClientException) LocalLookup(org.bf2.sync.informer.LocalLookup) Type(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type) Scheduled(io.quarkus.scheduler.Scheduled) OperandUtils(org.bf2.common.OperandUtils) NDC(org.jboss.logging.NDC) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ConditionUtils(org.bf2.common.ConditionUtils) Reason(org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason) Objects(java.util.Objects) Counted(io.micrometer.core.annotation.Counted) Namespace(io.fabric8.kubernetes.api.model.Namespace) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) ManagedExecutor(org.eclipse.microprofile.context.ManagedExecutor) Optional(java.util.Optional) ApplicationScoped(javax.enterprise.context.ApplicationScoped) ManagedKafkaSpec(org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ConcurrentExecution(io.quarkus.scheduler.Scheduled.ConcurrentExecution) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) HashMap(java.util.HashMap) ManagedKafkaStatusBuilder(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatusBuilder) ManagedKafkaSpec(org.bf2.operator.resources.v1alpha1.ManagedKafkaSpec) Namespace(io.fabric8.kubernetes.api.model.Namespace) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) Counted(io.micrometer.core.annotation.Counted) Timed(io.micrometer.core.annotation.Timed)

Example 49 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.

the class RegistryDeprovisioningIT method testDeprovisionRegistryBasic.

@Test
void testDeprovisionRegistryBasic() {
    FleetManagerApi.verifyApiIsSecured();
    var alice = new AccountInfo("testDeprovisionRegistry", "alice", false, 10L);
    var registry1 = new RegistryCreate();
    registry1.setName("registry1");
    var createdRegistry1 = FleetManagerApi.createRegistry(registry1, alice);
    assertNotEquals(RegistryStatusValue.failed, createdRegistry1.getStatus());
    Awaitility.await("registry1 available").atMost(30, SECONDS).pollInterval(5, SECONDS).until(() -> {
        var reg = FleetManagerApi.getRegistry(createdRegistry1.getId(), alice);
        return reg.getStatus().equals(RegistryStatusValue.ready);
    });
    Registry registry = FleetManagerApi.getRegistry(createdRegistry1.getId(), alice);
    TenantManagerClient tenantManager = Utils.createTenantManagerClient();
    var internalTenant = tenantManager.getTenant(registry.getId());
    assertEquals(TenantStatusValue.READY, internalTenant.getStatus());
    FleetManagerApi.deleteRegistry(createdRegistry1.getId(), alice);
    // We don't have to wait for the status to be RegistryStatusValueRest.deleting, since that happens almost immediately now.
    Awaitility.await("registry1 deleting initiated").atMost(5, SECONDS).pollInterval(1, SECONDS).until(() -> {
        var tenant1 = tenantManager.getTenant(registry.getId());
        return TenantStatusValue.TO_BE_DELETED.equals(tenant1.getStatus());
    });
    var req = new UpdateRegistryTenantRequest();
    req.setStatus(TenantStatusValue.DELETED);
    tenantManager.updateTenant(registry.getId(), req);
    Awaitility.await("registry1 deleted").atMost(5, SECONDS).pollInterval(1, SECONDS).until(() -> {
        try {
            FleetManagerApi.verifyRegistryNotExists(createdRegistry1.getId(), alice);
            return true;
        } catch (AssertionError ex) {
            return false;
        }
    });
}
Also used : TenantManagerClient(io.apicurio.multitenant.client.TenantManagerClient) Registry(org.bf2.srs.fleetmanager.rest.publicapi.beans.Registry) RegistryCreate(org.bf2.srs.fleetmanager.rest.publicapi.beans.RegistryCreate) AccountInfo(org.bf2.srs.fleetmanager.spi.common.model.AccountInfo) UpdateRegistryTenantRequest(io.apicurio.multitenant.api.datamodel.UpdateRegistryTenantRequest) Test(org.junit.jupiter.api.Test)

Example 50 with Status

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.

the class UsageMetrics method init.

public synchronized void init() {
    expirationPeriod = Duration.ofSeconds(expirationPeriodSeconds);
    int stagger = 0;
    // Only stagger if the expiration period is at least 1 minute (testing support).
    if (expirationPeriod.compareTo(Duration.ofMinutes(1)) >= 0) {
        stagger = new Random().nextInt(expirationPeriodSeconds) + 1;
        log.debug("Staggering usage metrics cache expiration by {} seconds", stagger);
    }
    nextExpiration = Instant.now().plus(Duration.ofSeconds(stagger));
    for (RegistryStatusValueDto status : RegistryStatusValueDto.values()) {
        Gauge.builder(USAGE_STATISTICS_REGISTRIES_STATUS, () -> {
            Arc.initialize();
            var ctx = Arc.container().requestContext();
            ctx.activate();
            try {
                return getUsageStatisticsCached().getRegistryCountPerStatus().get(status);
            } finally {
                ctx.deactivate();
            }
        }).tags(Tags.of(TAG_USAGE_STATISTICS_STATUS, status.value())).register(metrics);
    }
    for (RegistryInstanceTypeValueDto type : RegistryInstanceTypeValueDto.values()) {
        Gauge.builder(USAGE_STATISTICS_REGISTRIES_TYPE, () -> {
            Arc.initialize();
            var ctx = Arc.container().requestContext();
            ctx.activate();
            try {
                return getUsageStatisticsCached().getRegistryCountPerType().get(type);
            } finally {
                ctx.deactivate();
            }
        }).tags(Tags.of(TAG_USAGE_STATISTICS_TYPE, type.value())).register(metrics);
    }
    Gauge.builder(USAGE_STATISTICS_ACTIVE_USERS, () -> {
        Arc.initialize();
        var ctx = Arc.container().requestContext();
        ctx.activate();
        try {
            return getUsageStatisticsCached().getActiveUserCount();
        } finally {
            ctx.deactivate();
        }
    }).register(metrics);
    Gauge.builder(USAGE_STATISTICS_ACTIVE_ORGANISATIONS, () -> {
        Arc.initialize();
        var ctx = Arc.container().requestContext();
        ctx.activate();
        try {
            return getUsageStatisticsCached().getActiveOrganisationCount();
        } finally {
            ctx.deactivate();
        }
    }).register(metrics);
}
Also used : RegistryStatusValueDto(org.bf2.srs.fleetmanager.rest.service.model.RegistryStatusValueDto) Random(java.util.Random) RegistryInstanceTypeValueDto(org.bf2.srs.fleetmanager.rest.service.model.RegistryInstanceTypeValueDto)

Aggregations

Test (org.junit.jupiter.api.Test)14 ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)12 Objects (java.util.Objects)11 QuarkusTest (io.quarkus.test.junit.QuarkusTest)9 ManagedConnectorBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorBuilder)7 ManagedConnectorSpecBuilder (org.bf2.cos.fleetshard.api.ManagedConnectorSpecBuilder)7 Map (java.util.Map)6 Inject (javax.inject.Inject)6 OperatorSelectorBuilder (org.bf2.cos.fleetshard.api.OperatorSelectorBuilder)6 RegistryData (org.bf2.srs.fleetmanager.storage.sqlPanacheImpl.model.RegistryData)6 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)6 ObjectMetaBuilder (io.fabric8.kubernetes.api.model.ObjectMetaBuilder)5 List (java.util.List)5 ConnectorDeploymentStatus (org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatus)5 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)5 Quantity (io.fabric8.kubernetes.api.model.Quantity)4 KafkaConnectorBuilder (io.strimzi.api.kafka.model.KafkaConnectorBuilder)4 Transactional (javax.transaction.Transactional)4 ManagedKafkaCondition (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition)4 ManagedKafkaStatus (org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus)4