Search in sources :

Example 11 with Type

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.

the class ClusterStatusUpdaterWithOperatorTest method statusIsUpdated.

@Test
void statusIsUpdated() {
    final String statusUrl = "/api/connector_mgmt/v1/agent/kafka_connector_clusters/" + config.cluster().id() + "/status";
    final String operatorId = uid();
    kubernetesClient.resources(ManagedConnectorOperator.class).inNamespace(ns).create(new ManagedConnectorOperatorBuilder().withNewMetadata().withName(operatorId).endMetadata().withSpec(new ManagedConnectorOperatorSpecBuilder().withType("operator-type").withVersion("999").withRuntime("operator-runtime").build()).build());
    RestAssured.given().contentType(MediaType.TEXT_PLAIN).post("/test/provisioner/all");
    untilAsserted(() -> {
        server.verify(putRequestedFor(urlEqualTo(statusUrl)).withHeader(ContentTypeHeader.KEY, equalTo(APPLICATION_JSON)).withRequestBody(jp("$.phase", "ready")).withRequestBody(jp("$.operators.size()", "1")).withRequestBody(jp("$.operators[0].namespace", ns)).withRequestBody(jp("$.operators[0].status", "ready")).withRequestBody(jp("$.operators[0].operator.id", operatorId)).withRequestBody(jp("$.operators[0].operator.type", "operator-type")).withRequestBody(jp("$.operators[0].operator.version", "999")));
    });
}
Also used : ManagedConnectorOperatorBuilder(org.bf2.cos.fleetshard.api.ManagedConnectorOperatorBuilder) ManagedConnectorOperatorSpecBuilder(org.bf2.cos.fleetshard.api.ManagedConnectorOperatorSpecBuilder) QuarkusTest(io.quarkus.test.junit.QuarkusTest) Test(org.junit.jupiter.api.Test)

Example 12 with Type

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.

the class CamelOperandController method doReify.

@SuppressFBWarnings("HARD_CODE_PASSWORD")
@Override
protected List<HasMetadata> doReify(ManagedConnector connector, CamelShardMetadata shardMetadata, ConnectorConfiguration<ObjectNode, ObjectNode> connectorConfiguration, ServiceAccountSpec serviceAccountSpec) {
    final Map<String, String> properties = createSecretsData(connector, connectorConfiguration, serviceAccountSpec, configuration);
    final ObjectNode errorHandler = createErrorHandler(shardMetadata, connector, connectorConfiguration.getErrorHandlerSpec());
    final List<KameletEndpoint> stepDefinitions;
    final KameletEndpoint source;
    final KameletEndpoint sink;
    // 
    switch(shardMetadata.getConnectorType()) {
        case CONNECTOR_TYPE_SOURCE:
            source = KameletEndpoint.kamelet(shardMetadata.getKamelets().getAdapter().getName());
            source.getProperties().put("id", connector.getSpec().getDeploymentId() + "-source");
            configureKameletProperties(source.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getAdapter());
            sink = KameletEndpoint.kamelet(shardMetadata.getKamelets().getKafka().getName());
            sink.getProperties().put("id", connector.getSpec().getDeploymentId() + "-sink");
            sink.getProperties().put("bootstrapServers", connector.getSpec().getDeployment().getKafka().getUrl());
            sink.getProperties().put("user", SA_CLIENT_ID_PLACEHOLDER);
            sink.getProperties().put("password", SA_CLIENT_SECRET_PLACEHOLDER);
            configureKameletProperties(sink.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getKafka());
            if (hasSchemaRegistry(connector)) {
                sink.getProperties().put("registryUrl", connector.getSpec().getDeployment().getSchemaRegistry().getUrl());
            }
            stepDefinitions = createSteps(connector, connectorConfiguration, shardMetadata, sink);
            break;
        case CONNECTOR_TYPE_SINK:
            source = KameletEndpoint.kamelet(shardMetadata.getKamelets().getKafka().getName());
            source.getProperties().put("id", connector.getSpec().getDeploymentId() + "-source");
            source.getProperties().put("consumerGroup", connector.getSpec().getDeploymentId());
            source.getProperties().put("bootstrapServers", connector.getSpec().getDeployment().getKafka().getUrl());
            source.getProperties().put("user", SA_CLIENT_ID_PLACEHOLDER);
            source.getProperties().put("password", SA_CLIENT_SECRET_PLACEHOLDER);
            configureKameletProperties(source.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getKafka());
            if (hasSchemaRegistry(connector)) {
                source.getProperties().put("registryUrl", connector.getSpec().getDeployment().getSchemaRegistry().getUrl());
            }
            sink = KameletEndpoint.kamelet(shardMetadata.getKamelets().getAdapter().getName());
            sink.getProperties().put("id", connector.getSpec().getDeploymentId() + "-sink");
            configureKameletProperties(sink.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getAdapter());
            stepDefinitions = createSteps(connector, connectorConfiguration, shardMetadata, source);
            break;
        default:
            throw new IllegalArgumentException("Unknown connector type: " + shardMetadata.getConnectorType());
    }
    final Secret secret = new Secret();
    secret.setMetadata(new ObjectMeta());
    secret.getMetadata().setName(connector.getMetadata().getName() + Resources.CONNECTOR_SECRET_SUFFIX);
    secret.setData(Map.of(APPLICATION_PROPERTIES, asBytesBase64(properties)));
    final ObjectNode integration = createIntegrationSpec(secret.getMetadata().getName(), configuration, Map.of("CONNECTOR_SECRET_NAME", secret.getMetadata().getName(), "CONNECTOR_SECRET_CHECKSUM", Secrets.computeChecksum(secret), "CONNECTOR_ID", connector.getSpec().getConnectorId(), "CONNECTOR_DEPLOYMENT_ID", connector.getSpec().getDeploymentId()));
    final KameletBinding binding = new KameletBinding();
    binding.setMetadata(new ObjectMeta());
    binding.getMetadata().setName(connector.getMetadata().getName());
    binding.getMetadata().setAnnotations(new TreeMap<>());
    binding.setSpec(new KameletBindingSpec());
    binding.getSpec().setSource(source);
    binding.getSpec().setSink(sink);
    binding.getSpec().setErrorHandler(errorHandler);
    binding.getSpec().setSteps(stepDefinitions);
    binding.getSpec().setIntegration(integration);
    Map<String, String> annotations = binding.getMetadata().getAnnotations();
    if (shardMetadata.getAnnotations() != null) {
        annotations.putAll(shardMetadata.getAnnotations());
    }
    if (configuration.labelSelection().enabled()) {
        Operator assigned = connector.getStatus().getConnectorStatus().getAssignedOperator();
        if (assigned != null && assigned.getId() != null) {
            annotations.putIfAbsent(KAMEL_OPERATOR_ID, assigned.getId());
        }
    }
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_CONTAINER_IMAGE, shardMetadata.getConnectorImage());
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_KAMELETS_ENABLED, "false");
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_JVM_ENABLED, "false");
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_LOGGING_JSON, "false");
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_OWNER_TARGET_LABELS, LABELS_TO_TRANSFER);
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_PROMETHEUS_ENABLED, "true");
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_PROMETHEUS_POD_MONITOR, "false");
    // health check annotations
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_ENABLED, "true");
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_PROBE_ENABLED, "true");
    annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_PROBE_ENABLED, "true");
    CamelOperandConfiguration.Health health = configuration.health();
    if (health != null) {
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_SUCCESS_THRESHOLD, health.readinessSuccessThreshold());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_FAILURE_THRESHOLD, health.readinessFailureThreshold());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_PERIOD, health.readinessPeriodSeconds());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_TIMEOUT, health.readinessTimeoutSeconds());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_SUCCESS_THRESHOLD, health.livenessSuccessThreshold());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_FAILURE_THRESHOLD, health.livenessFailureThreshold());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_PERIOD, health.livenessPeriodSeconds());
        annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_TIMEOUT, health.livenessTimeoutSeconds());
    }
    if (configuration.connectors() != null) {
        if (configuration.connectors().traits() != null) {
            annotations.putAll(configuration.connectors().traits());
        }
        if (configuration.connectors().types() != null) {
            final String typeId = connector.getSpec().getDeployment().getConnectorTypeId();
            final CamelOperandConfiguration.ConnectorConfiguration typeConfig = configuration.connectors().types().get(typeId);
            if (typeConfig != null && typeConfig.traits() != null) {
                annotations.putAll(typeConfig.traits());
            }
        }
    }
    return List.of(secret, binding);
}
Also used : Operator(org.bf2.cos.fleetshard.api.Operator) ObjectMeta(io.fabric8.kubernetes.api.model.ObjectMeta) KameletBindingSpec(org.bf2.cos.fleetshard.operator.camel.model.KameletBindingSpec) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) KameletEndpoint(org.bf2.cos.fleetshard.operator.camel.model.KameletEndpoint) Secret(io.fabric8.kubernetes.api.model.Secret) KameletBinding(org.bf2.cos.fleetshard.operator.camel.model.KameletBinding) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 13 with Type

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.

the class DebeziumOperandControllerTest method computeStatus.

@ParameterizedTest
@MethodSource
void computeStatus(String connectorState, List<Condition> connectorConditions, List<Condition> connectConditions, String expectedManagedConnectorState, String expectedReason) {
    ConnectorStatusSpec status = new ConnectorStatusSpec();
    DebeziumOperandSupport.computeStatus(status, new KafkaConnectBuilder().withStatus(new KafkaConnectStatusBuilder().addAllToConditions(connectConditions).build()).build(), new KafkaConnectorBuilder().withStatus(new KafkaConnectorStatusBuilder().addAllToConditions(connectorConditions).addToConnectorStatus("connector", new org.bf2.cos.fleetshard.operator.debezium.model.KafkaConnectorStatusBuilder().withState(connectorState).build()).build()).build());
    assertThat(status.getPhase()).isEqualTo(expectedManagedConnectorState);
    assertThat(status.getConditions()).anySatisfy(condition -> assertThat(condition).hasFieldOrPropertyWithValue("type", "Ready").hasFieldOrPropertyWithValue("status", null == expectedReason ? "True" : "False").hasFieldOrPropertyWithValue("reason", expectedReason));
}
Also used : KafkaConnectBuilder(io.strimzi.api.kafka.model.KafkaConnectBuilder) KafkaConnectorBuilder(io.strimzi.api.kafka.model.KafkaConnectorBuilder) KafkaConnectStatusBuilder(io.strimzi.api.kafka.model.status.KafkaConnectStatusBuilder) ConnectorStatusSpec(org.bf2.cos.fleetshard.api.ConnectorStatusSpec) KafkaConnectorStatusBuilder(io.strimzi.api.kafka.model.status.KafkaConnectorStatusBuilder) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 14 with Type

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project kafka-admin-api by bf2fc6cc711aee1a0c2a.

the class ConsumerGroupOperations method resetGroupOffset.

@SuppressWarnings({ "checkstyle:JavaNCSS", "checkstyle:MethodLength" })
public static CompletionStage<PagedResponse<TopicPartitionResetResult>> resetGroupOffset(KafkaAdminClient ac, Types.ConsumerGroupOffsetResetParameters parameters) {
    Promise<PagedResponse<TopicPartitionResetResult>> prom = Promise.promise();
    switch(parameters.getOffset()) {
        case EARLIEST:
        case LATEST:
            break;
        default:
            if (parameters.getValue() == null) {
                throw new InvalidRequestException("Value has to be set when " + parameters.getOffset().getValue() + " offset is used.");
            }
    }
    Set<TopicPartition> topicPartitionsToReset = new HashSet<>();
    // CompositeFuture#join requires raw type
    @SuppressWarnings("rawtypes") List<Future> promises = new ArrayList<>();
    if (parameters.getTopics() == null || parameters.getTopics().isEmpty()) {
        // reset everything
        Promise<Void> promise = Promise.promise();
        promises.add(promise.future());
        ac.listConsumerGroupOffsets(parameters.getGroupId()).onSuccess(consumerGroupOffsets -> {
            consumerGroupOffsets.entrySet().forEach(offset -> {
                topicPartitionsToReset.add(offset.getKey());
            });
            promise.complete();
        }).onFailure(promise::fail);
    } else {
        parameters.getTopics().forEach(paramPartition -> {
            Promise promise = Promise.promise();
            promises.add(promise.future());
            if (paramPartition.getPartitions() == null || paramPartition.getPartitions().isEmpty()) {
                ac.describeTopics(Collections.singletonList(paramPartition.getTopic())).compose(topicsDesc -> {
                    topicsDesc.entrySet().forEach(topicEntry -> {
                        topicsDesc.get(topicEntry.getKey()).getPartitions().forEach(partition -> {
                            topicPartitionsToReset.add(new TopicPartition(topicEntry.getKey(), partition.getPartition()));
                        });
                    });
                    promise.complete();
                    return Future.succeededFuture(topicPartitionsToReset);
                }).onFailure(promise::fail);
            } else {
                paramPartition.getPartitions().forEach(numPartition -> {
                    topicPartitionsToReset.add(new TopicPartition(paramPartition.getTopic(), numPartition));
                });
                promise.complete();
            }
        });
    }
    // get the set of partitions we want to reset
    CompositeFuture.join(promises).compose(i -> {
        if (i.failed()) {
            return Future.failedFuture(i.cause());
        } else {
            return Future.succeededFuture();
        }
    }).compose(nothing -> {
        return validatePartitionsResettable(ac, parameters.getGroupId(), topicPartitionsToReset);
    }).compose(nothing -> {
        Map<TopicPartition, OffsetSpec> partitionsToFetchOffset = new HashMap<>();
        topicPartitionsToReset.forEach(topicPartition -> {
            OffsetSpec offsetSpec;
            // absolute - just for the warning that set offset could be higher than latest
            switch(parameters.getOffset()) {
                case LATEST:
                    offsetSpec = OffsetSpec.LATEST;
                    break;
                case EARLIEST:
                    offsetSpec = OffsetSpec.EARLIEST;
                    break;
                case TIMESTAMP:
                    try {
                        offsetSpec = OffsetSpec.TIMESTAMP(ZonedDateTime.parse(parameters.getValue(), DATE_TIME_FORMATTER).toInstant().toEpochMilli());
                    } catch (DateTimeParseException e) {
                        throw new InvalidRequestException("Timestamp must be in format 'yyyy-MM-dd'T'HH:mm:ssz'" + e.getMessage());
                    }
                    break;
                case ABSOLUTE:
                    // we are checking whether offset is not negative (set behind latest)
                    offsetSpec = OffsetSpec.LATEST;
                    break;
                default:
                    throw new InvalidRequestException("Offset can be 'absolute', 'latest', 'earliest' or 'timestamp' only");
            }
            partitionsToFetchOffset.put(topicPartition, offsetSpec);
        });
        return Future.succeededFuture(partitionsToFetchOffset);
    }).compose(partitionsToFetchOffset -> {
        Promise<Map<TopicPartition, ListOffsetsResultInfo>> promise = Promise.promise();
        ac.listOffsets(partitionsToFetchOffset, partitionsOffsets -> {
            if (partitionsOffsets.failed()) {
                promise.fail(partitionsOffsets.cause());
                return;
            }
            if (parameters.getOffset() == OffsetType.ABSOLUTE) {
                // numeric offset provided; check whether x > latest
                promise.complete(partitionsOffsets.result().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> {
                    if (entry.getValue().getOffset() < Long.parseLong(parameters.getValue())) {
                        log.warnf("Selected offset %s is larger than latest %d", parameters.getValue(), entry.getValue().getOffset());
                    }
                    return new ListOffsetsResultInfo(Long.parseLong(parameters.getValue()), entry.getValue().getTimestamp(), entry.getValue().getLeaderEpoch());
                })));
            } else {
                Map<TopicPartition, ListOffsetsResultInfo> kokot = partitionsOffsets.result().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> new ListOffsetsResultInfo(partitionsOffsets.result().get(entry.getKey()).getOffset(), entry.getValue().getTimestamp(), entry.getValue().getLeaderEpoch())));
                promise.complete(kokot);
            }
        });
        return promise.future();
    }).compose(newOffsets -> {
        // assemble new offsets object
        Promise<Map<TopicPartition, OffsetAndMetadata>> promise = Promise.promise();
        ac.listConsumerGroupOffsets(parameters.getGroupId(), list -> {
            if (list.failed()) {
                promise.fail(list.cause());
                return;
            }
            if (list.result().isEmpty()) {
                promise.fail(new InvalidRequestException("Consumer Group " + parameters.getGroupId() + " does not consume any topics/partitions"));
                return;
            }
            promise.complete(newOffsets.entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> new OffsetAndMetadata(newOffsets.get(entry.getKey()).getOffset(), list.result().get(entry.getKey()) == null ? null : list.result().get(entry.getKey()).getMetadata()))));
        });
        return promise.future();
    }).compose(newOffsets -> {
        Promise<Void> promise = Promise.promise();
        ac.alterConsumerGroupOffsets(parameters.getGroupId(), newOffsets, res -> {
            if (res.failed()) {
                promise.fail(res.cause());
                return;
            }
            promise.complete();
            log.info("resetting offsets");
        });
        return promise.future();
    }).compose(i -> {
        Promise<Types.PagedResponse<Types.TopicPartitionResetResult>> promise = Promise.promise();
        ac.listConsumerGroupOffsets(parameters.getGroupId(), res -> {
            if (res.failed()) {
                promise.fail(res.cause());
                return;
            }
            var result = res.result().entrySet().stream().map(entry -> {
                Types.TopicPartitionResetResult reset = new Types.TopicPartitionResetResult();
                reset.setTopic(entry.getKey().getTopic());
                reset.setPartition(entry.getKey().getPartition());
                reset.setOffset(entry.getValue().getOffset());
                return reset;
            }).collect(Collectors.toList());
            Types.PagedResponse.forItems(result).onSuccess(promise::complete).onFailure(promise::fail);
        });
        return promise.future();
    }).onComplete(res -> {
        if (res.succeeded()) {
            prom.complete(res.result());
        } else {
            prom.fail(res.cause());
        }
        ac.close();
    });
    return prom.future().toCompletionStage();
}
Also used : CommonHandler(org.bf2.admin.kafka.admin.handlers.CommonHandler) Logger(org.jboss.logging.Logger) ZonedDateTime(java.time.ZonedDateTime) HashMap(java.util.HashMap) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) CompositeFuture(io.vertx.core.CompositeFuture) ConsumerGroupDescription(io.vertx.kafka.admin.ConsumerGroupDescription) ConsumerGroupListing(io.vertx.kafka.admin.ConsumerGroupListing) TopicPartitionResetResult(org.bf2.admin.kafka.admin.model.Types.TopicPartitionResetResult) Map(java.util.Map) PagedResponse(org.bf2.admin.kafka.admin.model.Types.PagedResponse) MemberDescription(io.vertx.kafka.admin.MemberDescription) ToLongFunction(java.util.function.ToLongFunction) ListOffsetsResultInfo(io.vertx.kafka.admin.ListOffsetsResultInfo) Promise(io.vertx.core.Promise) OffsetAndMetadata(io.vertx.kafka.client.consumer.OffsetAndMetadata) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) TopicPartition(io.vertx.kafka.client.common.TopicPartition) Objects(java.util.Objects) GroupIdNotFoundException(org.apache.kafka.common.errors.GroupIdNotFoundException) OffsetType(org.bf2.admin.kafka.admin.model.Types.ConsumerGroupOffsetResetParameters.OffsetType) DateTimeParseException(java.time.format.DateTimeParseException) List(java.util.List) CompletionStage(java.util.concurrent.CompletionStage) Stream(java.util.stream.Stream) PagedResponseDeprecated(org.bf2.admin.kafka.admin.model.Types.PagedResponseDeprecated) DateTimeFormatter(java.time.format.DateTimeFormatter) ConsumerGroupState(org.apache.kafka.common.ConsumerGroupState) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) Pattern(java.util.regex.Pattern) Comparator(java.util.Comparator) Types(org.bf2.admin.kafka.admin.model.Types) Collections(java.util.Collections) KafkaAdminClient(io.vertx.kafka.admin.KafkaAdminClient) OffsetSpec(io.vertx.kafka.admin.OffsetSpec) Types(org.bf2.admin.kafka.admin.model.Types) TopicPartitionResetResult(org.bf2.admin.kafka.admin.model.Types.TopicPartitionResetResult) ListOffsetsResultInfo(io.vertx.kafka.admin.ListOffsetsResultInfo) ArrayList(java.util.ArrayList) TopicPartitionResetResult(org.bf2.admin.kafka.admin.model.Types.TopicPartitionResetResult) OffsetAndMetadata(io.vertx.kafka.client.consumer.OffsetAndMetadata) InvalidRequestException(org.apache.kafka.common.errors.InvalidRequestException) HashSet(java.util.HashSet) Promise(io.vertx.core.Promise) DateTimeParseException(java.time.format.DateTimeParseException) TopicPartition(io.vertx.kafka.client.common.TopicPartition) OffsetSpec(io.vertx.kafka.admin.OffsetSpec) CompositeFuture(io.vertx.core.CompositeFuture) Future(io.vertx.core.Future) PagedResponse(org.bf2.admin.kafka.admin.model.Types.PagedResponse) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 15 with Type

use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project kas-fleetshard by bf2fc6cc711aee1a0c2a.

the class SecuritySecretManager method buildSecretFrom.

private static Secret buildSecretFrom(String name, String type, ManagedKafka managedKafka, Secret current, Map<String, String> dataSource) {
    SecretBuilder builder = current != null ? new SecretBuilder(current) : new SecretBuilder();
    Map<String, String> data = dataSource.entrySet().stream().map(entry -> Map.entry(entry.getKey(), encode(entry.getValue()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
    Secret secret = builder.editOrNewMetadata().withNamespace(kafkaClusterNamespace(managedKafka)).withName(name).withLabels(OperandUtils.getDefaultLabels()).endMetadata().withType(type).withData(data).build();
    // setting the ManagedKafka has owner of the Secret resource is needed
    // by the operator sdk to handle events on the Secret resource properly
    OperandUtils.setAsOwner(managedKafka, secret);
    return secret;
}
Also used : SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) OperandUtils(org.bf2.common.OperandUtils) MessageDigest(java.security.MessageDigest) ServiceAccount(org.bf2.operator.resources.v1alpha1.ServiceAccount) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) Inject(javax.inject.Inject) Objects(java.util.Objects) Resource(io.fabric8.kubernetes.client.dsl.Resource) Base64(java.util.Base64) List(java.util.List) KafkaResources(io.strimzi.api.kafka.model.KafkaResources) NoSuchAlgorithmException(java.security.NoSuchAlgorithmException) Map(java.util.Map) KubernetesClient(io.fabric8.kubernetes.client.KubernetesClient) Secret(io.fabric8.kubernetes.api.model.Secret) ApplicationScoped(javax.enterprise.context.ApplicationScoped) BigInteger(java.math.BigInteger) SecretBuilder(io.fabric8.kubernetes.api.model.SecretBuilder) ManagedKafka(org.bf2.operator.resources.v1alpha1.ManagedKafka) Secret(io.fabric8.kubernetes.api.model.Secret) Map(java.util.Map)

Aggregations

ManagedConnector (org.bf2.cos.fleetshard.api.ManagedConnector)5 Secret (io.fabric8.kubernetes.api.model.Secret)4 Objects (java.util.Objects)4 Collectors (java.util.stream.Collectors)4 Operator (org.bf2.cos.fleetshard.api.Operator)4 ObjectMetaBuilder (io.fabric8.kubernetes.api.model.ObjectMetaBuilder)3 QuarkusTest (io.quarkus.test.junit.QuarkusTest)3 List (java.util.List)3 Map (java.util.Map)3 ApplicationScoped (javax.enterprise.context.ApplicationScoped)3 ConnectorDeploymentStatus (org.bf2.cos.fleet.manager.model.ConnectorDeploymentStatus)3 MetaV1Condition (org.bf2.cos.fleet.manager.model.MetaV1Condition)3 Resources (org.bf2.cos.fleetshard.support.resources.Resources)3 Test (org.junit.jupiter.api.Test)3 ArrayNode (com.fasterxml.jackson.databind.node.ArrayNode)2 ObjectNode (com.fasterxml.jackson.databind.node.ObjectNode)2 Condition (io.fabric8.kubernetes.api.model.Condition)2 HasMetadata (io.fabric8.kubernetes.api.model.HasMetadata)2 ObjectMeta (io.fabric8.kubernetes.api.model.ObjectMeta)2 SecretBuilder (io.fabric8.kubernetes.api.model.SecretBuilder)2