Search in sources :

Example 51 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class AbstractKafkaCluster method isStrimziUpdating.

public boolean isStrimziUpdating(ManagedKafka managedKafka) {
    Kafka kafka = cachedKafka(managedKafka);
    if (kafka == null) {
        return false;
    }
    Map<String, String> annotations = Objects.requireNonNullElse(kafka.getMetadata().getAnnotations(), Collections.emptyMap());
    return StrimziManager.isPauseReasonStrimziUpdate(annotations) && isReconciliationPaused(managedKafka);
}
Also used : Kafka(io.strimzi.api.kafka.model.Kafka) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka)

Example 52 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class SecuritySecretManager method buildSecretFrom.

private static Secret buildSecretFrom(String name, String type, ManagedKafka managedKafka, Secret current, Map<String, String> dataSource) {
    SecretBuilder builder = current != null ? new SecretBuilder(current) : new SecretBuilder();
    Map<String, String> data = dataSource.entrySet().stream().map(entry -> Map.entry(entry.getKey(), encode(entry.getValue()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    Secret secret = builder.editOrNewMetadata().withNamespace(kafkaClusterNamespace(managedKafka)).withName(name).withLabels(OperandUtils.getDefaultLabels()).endMetadata().withType(type).withData(data).build();
    // setting the ManagedKafka has owner of the Secret resource is needed
    // by the operator sdk to handle events on the Secret resource properly
    OperandUtils.setAsOwner(managedKafka, secret);
    return secret;
}
Also used : SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) OperandUtils(org.bf2.common.OperandUtils) MessageDigest(java.security.MessageDigest) ServiceAccount(org.bf2.operator.resources.v1alpha1.ServiceAccount) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Inject(javax.inject.Inject) Objects(java.util.Objects) Resource(io.fabric8.kubernetes.client.dsl.Resource) Base64(java.util.Base64) List(java.util.List) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) Map(java.util.Map) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) Secret(io.fabric8.kubernetes.api.model.Secret) ApplicationScoped(javax.enterprise.context.ApplicationScoped) BigInteger(java.math.BigInteger) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Secret(io.fabric8.kubernetes.api.model.Secret) Map(java.util.Map)

Example 53 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class SecuritySecretManager method digestSecretsVersions.

public String digestSecretsVersions(ManagedKafka managedKafka, List<String> secretNames) {
    final MessageDigest secretsDigest;
    try {
        secretsDigest = MessageDigest.getInstance("SHA-1");
    } catch (NoSuchAlgorithmException e) {
        throw new RuntimeException(e);
    }
    secretNames.stream().map(name -> cachedOrRemoteSecret(managedKafka, name)).filter(Objects::nonNull).map(Secret::getMetadata).forEach(secretMetadata -> {
        secretsDigest.update(secretMetadata.getUid().getBytes(StandardCharsets.UTF_8));
        secretsDigest.update(secretMetadata.getResourceVersion().getBytes(StandardCharsets.UTF_8));
    });
    return String.format("%040x", new BigInteger(1, secretsDigest.digest()));
}
Also used : OperandUtils(org.bf2.common.OperandUtils) MessageDigest(java.security.MessageDigest) ServiceAccount(org.bf2.operator.resources.v1alpha1.ServiceAccount) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Inject(javax.inject.Inject) Objects(java.util.Objects) Resource(io.fabric8.kubernetes.client.dsl.Resource) Base64(java.util.Base64) List(java.util.List) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) Map(java.util.Map) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) Secret(io.fabric8.kubernetes.api.model.Secret) ApplicationScoped(javax.enterprise.context.ApplicationScoped) BigInteger(java.math.BigInteger) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Objects(java.util.Objects) BigInteger(java.math.BigInteger) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) MessageDigest(java.security.MessageDigest)

Example 54 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class MockManagedKafkaFactory method loop.

@Scheduled(every = "{mock.factory.interval}")
void loop() {
    Random random = new Random(System.currentTimeMillis());
    log.info("Mock ManagedKafka Factory:: Running Simulation");
    // feed the start of clusters
    if (this.kafkas.size() == 0) {
        int max = Math.abs(random.nextInt(maxKafkas));
        for (int i = 0; i < max; i++) {
            ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
            log.infof("Mock ManagedKafka Factory::marking %s for addition", k.getId());
            this.kafkas.put(k.getId(), k);
            mkClient.create(k);
        }
    }
    // delete a instance by random
    if (this.kafkas.size() > 1 && random.nextBoolean()) {
        int idx = Math.abs(random.nextInt(this.kafkas.size()));
        int i = 0;
        for (ManagedKafka k : kafkas.values()) {
            if (i++ < idx) {
                continue;
            } else {
                markForDeletion(k.getId());
                break;
            }
        }
    }
    // selectively add
    if (this.kafkas.size() < maxKafkas && random.nextBoolean()) {
        ManagedKafka k = ManagedKafka.getDummyInstance(this.clusterIdGenerator.getAndIncrement());
        log.infof("Mock ManagedKafka Factory:: creating a new cluster %s ", k.getId());
        this.kafkas.put(k.getId(), k);
        mkClient.create(k);
    }
    log.info("--------------------------------------------------");
    for (ManagedKafka mk : this.kafkas.values()) {
        log.infof("ManagedKafka: %s, delete requested: %s", mk.getId(), mk.getSpec().isDeleted());
    }
    log.info("--------------------------------------------------");
}
Also used : ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Random(java.util.Random) Scheduled(io.quarkus.scheduler.Scheduled)

Example 55 with ManagedKafka

use of org.bf2.operator.resources.v1alpha1.ManagedKafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class ManagedKafkaST method testCreateManagedKafkaRestartKubeApi.

@SequentialTest
void testCreateManagedKafkaRestartKubeApi(ExtensionContext extensionContext) throws Exception {
    ExecutorService executor = Executors.newFixedThreadPool(1);
    try {
        String mkAppName = "mk-test-restart-kubeapi";
        ManagedKafka mk = ManagedKafkaResourceType.getDefault(mkAppName, mkAppName, keycloak, latestStrimziVersion, latestKafkaVersion);
        // start restarting kubeapi
        executor.execute(TestUtils::restartKubeApi);
        Thread.sleep(5_000);
        // Create mk using api
        resourceManager.addResource(extensionContext, new NamespaceBuilder().withNewMetadata().withName(mkAppName).endMetadata().build());
        resourceManager.addResource(extensionContext, mk);
        HttpResponse<String> res = SyncApiClient.createManagedKafka(mk, syncEndpoint);
        assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
        // stop restarting kubeapi
        executor.shutdownNow();
        resourceManager.waitResourceCondition(mk, m -> ManagedKafkaResourceType.hasConditionStatus(m, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True), TimeUnit.MINUTES.toMillis(15));
        LOGGER.info("ManagedKafka {} created", mkAppName);
        // wait for the sync to be up-to-date
        TestUtils.waitFor("Managed kafka status sync", 1_000, 60_000, () -> {
            try {
                String statusBody = SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body();
                if (statusBody.isEmpty()) {
                    return false;
                }
                ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(statusBody, ManagedKafkaStatus.class);
                return ManagedKafkaResourceType.hasConditionStatus(apiStatus, ManagedKafkaCondition.Type.Ready, ManagedKafkaCondition.Status.True);
            } catch (Exception e) {
                throw new AssertionError(e);
            }
        });
        // Get status and compare with CR status
        ManagedKafkaStatus apiStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaStatus(mk.getId(), syncEndpoint).body(), ManagedKafkaStatus.class);
        ManagedKafka managedKafka = ManagedKafkaResourceType.getOperation().inNamespace(mkAppName).withName(mkAppName).get();
        AssertUtils.assertManagedKafkaStatus(managedKafka, apiStatus);
        // Get agent status
        ManagedKafkaAgentStatus agentStatus = Serialization.jsonMapper().readValue(SyncApiClient.getManagedKafkaAgentStatus(syncEndpoint).body(), ManagedKafkaAgentStatus.class);
        AssertUtils.assertManagedKafkaAgentStatus(agentStatus);
        // Check if managed kafka deployed all components
        AssertUtils.assertManagedKafka(mk);
        // start restarting kubeapi
        executor = Executors.newFixedThreadPool(1);
        executor.execute(TestUtils::restartKubeApi);
        Thread.sleep(5_000);
        // delete mk using api
        res = SyncApiClient.deleteManagedKafka(mk.getId(), syncEndpoint);
        assertEquals(HttpURLConnection.HTTP_NO_CONTENT, res.statusCode());
        // stop restarting kubeapi
        executor.shutdownNow();
        ManagedKafkaResourceType.isDeleted(mk);
        LOGGER.info("ManagedKafka {} deleted", mkAppName);
    } finally {
        executor.shutdownNow();
    }
}
Also used : TestUtils(org.bf2.test.TestUtils) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) ExecutorService(java.util.concurrent.ExecutorService) NamespaceBuilder(io.fabric8.kubernetes.api.model.NamespaceBuilder) ManagedKafkaAgentStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus) ManagedKafkaStatus(org.bf2.operator.resources.v1alpha1.ManagedKafkaStatus) SequentialTest(org.bf2.systemtest.framework.SequentialTest)

Aggregations

ManagedKafka (org.bf2.operator.resources.v1alpha1.ManagedKafka)67 Kafka (io.strimzi.api.kafka.model.Kafka)30 Test (org.junit.jupiter.api.Test)24 QuarkusTest (io.quarkus.test.junit.QuarkusTest)23 List (java.util.List)16 Map (java.util.Map)15 Inject (javax.inject.Inject)15 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)15 Objects (java.util.Objects)14 Quantity (io.fabric8.kubernetes.api.model.Quantity)11 Optional (java.util.Optional)11 Collectors (java.util.stream.Collectors)10 ApplicationScoped (javax.enterprise.context.ApplicationScoped)9 StrimziManager (org.bf2.operator.managers.StrimziManager)9 Logger (org.jboss.logging.Logger)9 KubernetesClient (io.fabric8.kubernetes.client.KubernetesClient)8 ArrayList (java.util.ArrayList)8 Reason (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Reason)8 Status (org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status)8 ManagedKafkaUtils.exampleManagedKafka (org.bf2.operator.utils.ManagedKafkaUtils.exampleManagedKafka)8