use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ClusterStatusUpdaterWithOperatorTest method statusIsUpdated.
@Test
void statusIsUpdated() {
final String statusUrl = "/api/connector_mgmt/v1/agent/kafka_connector_clusters/" + config.cluster().id() + "/status";
final String operatorId = uid();
kubernetesClient.resources(ManagedConnectorOperator.class).inNamespace(ns).create(new ManagedConnectorOperatorBuilder().withNewMetadata().withName(operatorId).endMetadata().withSpec(new ManagedConnectorOperatorSpecBuilder().withType("operator-type").withVersion("999").withRuntime("operator-runtime").build()).build());
RestAssured.given().contentType(MediaType.TEXT_PLAIN).post("/test/provisioner/all");
untilAsserted(() -> {
server.verify(putRequestedFor(urlEqualTo(statusUrl)).withHeader(ContentTypeHeader.KEY, equalTo(APPLICATION_JSON)).withRequestBody(jp("$.phase", "ready")).withRequestBody(jp("$.operators.size()", "1")).withRequestBody(jp("$.operators[0].namespace", ns)).withRequestBody(jp("$.operators[0].status", "ready")).withRequestBody(jp("$.operators[0].operator.id", operatorId)).withRequestBody(jp("$.operators[0].operator.type", "operator-type")).withRequestBody(jp("$.operators[0].operator.version", "999")));
});
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class CamelOperandController method doReify.
@SuppressFBWarnings("HARD_CODE_PASSWORD")
@Override
protected List<HasMetadata> doReify(ManagedConnector connector, CamelShardMetadata shardMetadata, ConnectorConfiguration<ObjectNode, ObjectNode> connectorConfiguration, ServiceAccountSpec serviceAccountSpec) {
final Map<String, String> properties = createSecretsData(connector, connectorConfiguration, serviceAccountSpec, configuration);
final ObjectNode errorHandler = createErrorHandler(shardMetadata, connector, connectorConfiguration.getErrorHandlerSpec());
final List<KameletEndpoint> stepDefinitions;
final KameletEndpoint source;
final KameletEndpoint sink;
//
switch(shardMetadata.getConnectorType()) {
case CONNECTOR_TYPE_SOURCE:
source = KameletEndpoint.kamelet(shardMetadata.getKamelets().getAdapter().getName());
source.getProperties().put("id", connector.getSpec().getDeploymentId() + "-source");
configureKameletProperties(source.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getAdapter());
sink = KameletEndpoint.kamelet(shardMetadata.getKamelets().getKafka().getName());
sink.getProperties().put("id", connector.getSpec().getDeploymentId() + "-sink");
sink.getProperties().put("bootstrapServers", connector.getSpec().getDeployment().getKafka().getUrl());
sink.getProperties().put("user", SA_CLIENT_ID_PLACEHOLDER);
sink.getProperties().put("password", SA_CLIENT_SECRET_PLACEHOLDER);
configureKameletProperties(sink.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getKafka());
if (hasSchemaRegistry(connector)) {
sink.getProperties().put("registryUrl", connector.getSpec().getDeployment().getSchemaRegistry().getUrl());
}
stepDefinitions = createSteps(connector, connectorConfiguration, shardMetadata, sink);
break;
case CONNECTOR_TYPE_SINK:
source = KameletEndpoint.kamelet(shardMetadata.getKamelets().getKafka().getName());
source.getProperties().put("id", connector.getSpec().getDeploymentId() + "-source");
source.getProperties().put("consumerGroup", connector.getSpec().getDeploymentId());
source.getProperties().put("bootstrapServers", connector.getSpec().getDeployment().getKafka().getUrl());
source.getProperties().put("user", SA_CLIENT_ID_PLACEHOLDER);
source.getProperties().put("password", SA_CLIENT_SECRET_PLACEHOLDER);
configureKameletProperties(source.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getKafka());
if (hasSchemaRegistry(connector)) {
source.getProperties().put("registryUrl", connector.getSpec().getDeployment().getSchemaRegistry().getUrl());
}
sink = KameletEndpoint.kamelet(shardMetadata.getKamelets().getAdapter().getName());
sink.getProperties().put("id", connector.getSpec().getDeploymentId() + "-sink");
configureKameletProperties(sink.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getAdapter());
stepDefinitions = createSteps(connector, connectorConfiguration, shardMetadata, source);
break;
default:
throw new IllegalArgumentException("Unknown connector type: " + shardMetadata.getConnectorType());
}
final Secret secret = new Secret();
secret.setMetadata(new ObjectMeta());
secret.getMetadata().setName(connector.getMetadata().getName() + Resources.CONNECTOR_SECRET_SUFFIX);
secret.setData(Map.of(APPLICATION_PROPERTIES, asBytesBase64(properties)));
final ObjectNode integration = createIntegrationSpec(secret.getMetadata().getName(), configuration, Map.of("CONNECTOR_SECRET_NAME", secret.getMetadata().getName(), "CONNECTOR_SECRET_CHECKSUM", Secrets.computeChecksum(secret), "CONNECTOR_ID", connector.getSpec().getConnectorId(), "CONNECTOR_DEPLOYMENT_ID", connector.getSpec().getDeploymentId()));
final KameletBinding binding = new KameletBinding();
binding.setMetadata(new ObjectMeta());
binding.getMetadata().setName(connector.getMetadata().getName());
binding.getMetadata().setAnnotations(new TreeMap<>());
binding.setSpec(new KameletBindingSpec());
binding.getSpec().setSource(source);
binding.getSpec().setSink(sink);
binding.getSpec().setErrorHandler(errorHandler);
binding.getSpec().setSteps(stepDefinitions);
binding.getSpec().setIntegration(integration);
Map<String, String> annotations = binding.getMetadata().getAnnotations();
if (shardMetadata.getAnnotations() != null) {
annotations.putAll(shardMetadata.getAnnotations());
}
if (configuration.labelSelection().enabled()) {
Operator assigned = connector.getStatus().getConnectorStatus().getAssignedOperator();
if (assigned != null && assigned.getId() != null) {
annotations.putIfAbsent(KAMEL_OPERATOR_ID, assigned.getId());
}
}
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_CONTAINER_IMAGE, shardMetadata.getConnectorImage());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_KAMELETS_ENABLED, "false");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_JVM_ENABLED, "false");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_LOGGING_JSON, "false");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_OWNER_TARGET_LABELS, LABELS_TO_TRANSFER);
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_PROMETHEUS_ENABLED, "true");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_PROMETHEUS_POD_MONITOR, "false");
// health check annotations
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_ENABLED, "true");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_PROBE_ENABLED, "true");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_PROBE_ENABLED, "true");
CamelOperandConfiguration.Health health = configuration.health();
if (health != null) {
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_SUCCESS_THRESHOLD, health.readinessSuccessThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_FAILURE_THRESHOLD, health.readinessFailureThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_PERIOD, health.readinessPeriodSeconds());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_TIMEOUT, health.readinessTimeoutSeconds());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_SUCCESS_THRESHOLD, health.livenessSuccessThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_FAILURE_THRESHOLD, health.livenessFailureThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_PERIOD, health.livenessPeriodSeconds());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_TIMEOUT, health.livenessTimeoutSeconds());
}
if (configuration.connectors() != null) {
if (configuration.connectors().traits() != null) {
annotations.putAll(configuration.connectors().traits());
}
if (configuration.connectors().types() != null) {
final String typeId = connector.getSpec().getDeployment().getConnectorTypeId();
final CamelOperandConfiguration.ConnectorConfiguration typeConfig = configuration.connectors().types().get(typeId);
if (typeConfig != null && typeConfig.traits() != null) {
annotations.putAll(typeConfig.traits());
}
}
}
return List.of(secret, binding);
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method computeStatus.
@ParameterizedTest
@MethodSource
void computeStatus(String connectorState, List<Condition> connectorConditions, List<Condition> connectConditions, String expectedManagedConnectorState, String expectedReason) {
ConnectorStatusSpec status = new ConnectorStatusSpec();
DebeziumOperandSupport.computeStatus(status, new KafkaConnectBuilder().withStatus(new KafkaConnectStatusBuilder().addAllToConditions(connectConditions).build()).build(), new KafkaConnectorBuilder().withStatus(new KafkaConnectorStatusBuilder().addAllToConditions(connectorConditions).addToConnectorStatus("connector", new org.bf2.cos.fleetshard.operator.debezium.model.KafkaConnectorStatusBuilder().withState(connectorState).build()).build()).build());
assertThat(status.getPhase()).isEqualTo(expectedManagedConnectorState);
assertThat(status.getConditions()).anySatisfy(condition -> assertThat(condition).hasFieldOrPropertyWithValue("type", "Ready").hasFieldOrPropertyWithValue("status", null == expectedReason ? "True" : "False").hasFieldOrPropertyWithValue("reason", expectedReason));
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class ConsumerGroupOperations method resetGroupOffset.
@SuppressWarnings({ "checkstyle:JavaNCSS", "checkstyle:MethodLength" })
public static CompletionStage<PagedResponse<TopicPartitionResetResult>> resetGroupOffset(KafkaAdminClient ac, Types.ConsumerGroupOffsetResetParameters parameters) {
Promise<PagedResponse<TopicPartitionResetResult>> prom = Promise.promise();
switch(parameters.getOffset()) {
case EARLIEST:
case LATEST:
break;
default:
if (parameters.getValue() == null) {
throw new InvalidRequestException("Value has to be set when " + parameters.getOffset().getValue() + " offset is used.");
}
}
Set<TopicPartition> topicPartitionsToReset = new HashSet<>();
// CompositeFuture#join requires raw type
@SuppressWarnings("rawtypes") List<Future> promises = new ArrayList<>();
if (parameters.getTopics() == null || parameters.getTopics().isEmpty()) {
// reset everything
Promise<Void> promise = Promise.promise();
promises.add(promise.future());
ac.listConsumerGroupOffsets(parameters.getGroupId()).onSuccess(consumerGroupOffsets -> {
consumerGroupOffsets.entrySet().forEach(offset -> {
topicPartitionsToReset.add(offset.getKey());
});
promise.complete();
}).onFailure(promise::fail);
} else {
parameters.getTopics().forEach(paramPartition -> {
Promise promise = Promise.promise();
promises.add(promise.future());
if (paramPartition.getPartitions() == null || paramPartition.getPartitions().isEmpty()) {
ac.describeTopics(Collections.singletonList(paramPartition.getTopic())).compose(topicsDesc -> {
topicsDesc.entrySet().forEach(topicEntry -> {
topicsDesc.get(topicEntry.getKey()).getPartitions().forEach(partition -> {
topicPartitionsToReset.add(new TopicPartition(topicEntry.getKey(), partition.getPartition()));
});
});
promise.complete();
return Future.succeededFuture(topicPartitionsToReset);
}).onFailure(promise::fail);
} else {
paramPartition.getPartitions().forEach(numPartition -> {
topicPartitionsToReset.add(new TopicPartition(paramPartition.getTopic(), numPartition));
});
promise.complete();
}
});
}
// get the set of partitions we want to reset
CompositeFuture.join(promises).compose(i -> {
if (i.failed()) {
return Future.failedFuture(i.cause());
} else {
return Future.succeededFuture();
}
}).compose(nothing -> {
return validatePartitionsResettable(ac, parameters.getGroupId(), topicPartitionsToReset);
}).compose(nothing -> {
Map<TopicPartition, OffsetSpec> partitionsToFetchOffset = new HashMap<>();
topicPartitionsToReset.forEach(topicPartition -> {
OffsetSpec offsetSpec;
// absolute - just for the warning that set offset could be higher than latest
switch(parameters.getOffset()) {
case LATEST:
offsetSpec = OffsetSpec.LATEST;
break;
case EARLIEST:
offsetSpec = OffsetSpec.EARLIEST;
break;
case TIMESTAMP:
try {
offsetSpec = OffsetSpec.TIMESTAMP(ZonedDateTime.parse(parameters.getValue(), DATE_TIME_FORMATTER).toInstant().toEpochMilli());
} catch (DateTimeParseException e) {
throw new InvalidRequestException("Timestamp must be in format 'yyyy-MM-dd'T'HH:mm:ssz'" + e.getMessage());
}
break;
case ABSOLUTE:
// we are checking whether offset is not negative (set behind latest)
offsetSpec = OffsetSpec.LATEST;
break;
default:
throw new InvalidRequestException("Offset can be 'absolute', 'latest', 'earliest' or 'timestamp' only");
}
partitionsToFetchOffset.put(topicPartition, offsetSpec);
});
return Future.succeededFuture(partitionsToFetchOffset);
}).compose(partitionsToFetchOffset -> {
Promise<Map<TopicPartition, ListOffsetsResultInfo>> promise = Promise.promise();
ac.listOffsets(partitionsToFetchOffset, partitionsOffsets -> {
if (partitionsOffsets.failed()) {
promise.fail(partitionsOffsets.cause());
return;
}
if (parameters.getOffset() == OffsetType.ABSOLUTE) {
// numeric offset provided; check whether x > latest
promise.complete(partitionsOffsets.result().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> {
if (entry.getValue().getOffset() < Long.parseLong(parameters.getValue())) {
log.warnf("Selected offset %s is larger than latest %d", parameters.getValue(), entry.getValue().getOffset());
}
return new ListOffsetsResultInfo(Long.parseLong(parameters.getValue()), entry.getValue().getTimestamp(), entry.getValue().getLeaderEpoch());
})));
} else {
Map<TopicPartition, ListOffsetsResultInfo> kokot = partitionsOffsets.result().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> new ListOffsetsResultInfo(partitionsOffsets.result().get(entry.getKey()).getOffset(), entry.getValue().getTimestamp(), entry.getValue().getLeaderEpoch())));
promise.complete(kokot);
}
});
return promise.future();
}).compose(newOffsets -> {
// assemble new offsets object
Promise<Map<TopicPartition, OffsetAndMetadata>> promise = Promise.promise();
ac.listConsumerGroupOffsets(parameters.getGroupId(), list -> {
if (list.failed()) {
promise.fail(list.cause());
return;
}
if (list.result().isEmpty()) {
promise.fail(new InvalidRequestException("Consumer Group " + parameters.getGroupId() + " does not consume any topics/partitions"));
return;
}
promise.complete(newOffsets.entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey(), entry -> new OffsetAndMetadata(newOffsets.get(entry.getKey()).getOffset(), list.result().get(entry.getKey()) == null ? null : list.result().get(entry.getKey()).getMetadata()))));
});
return promise.future();
}).compose(newOffsets -> {
Promise<Void> promise = Promise.promise();
ac.alterConsumerGroupOffsets(parameters.getGroupId(), newOffsets, res -> {
if (res.failed()) {
promise.fail(res.cause());
return;
}
promise.complete();
log.info("resetting offsets");
});
return promise.future();
}).compose(i -> {
Promise<Types.PagedResponse<Types.TopicPartitionResetResult>> promise = Promise.promise();
ac.listConsumerGroupOffsets(parameters.getGroupId(), res -> {
if (res.failed()) {
promise.fail(res.cause());
return;
}
var result = res.result().entrySet().stream().map(entry -> {
Types.TopicPartitionResetResult reset = new Types.TopicPartitionResetResult();
reset.setTopic(entry.getKey().getTopic());
reset.setPartition(entry.getKey().getPartition());
reset.setOffset(entry.getValue().getOffset());
return reset;
}).collect(Collectors.toList());
Types.PagedResponse.forItems(result).onSuccess(promise::complete).onFailure(promise::fail);
});
return promise.future();
}).onComplete(res -> {
if (res.succeeded()) {
prom.complete(res.result());
} else {
prom.fail(res.cause());
}
ac.close();
});
return prom.future().toCompletionStage();
}
use of org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SecuritySecretManager method buildSecretFrom.
private static Secret buildSecretFrom(String name, String type, ManagedKafka managedKafka, Secret current, Map<String, String> dataSource) {
SecretBuilder builder = current != null ? new SecretBuilder(current) : new SecretBuilder();
Map<String, String> data = dataSource.entrySet().stream().map(entry -> Map.entry(entry.getKey(), encode(entry.getValue()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Secret secret = builder.editOrNewMetadata().withNamespace(kafkaClusterNamespace(managedKafka)).withName(name).withLabels(OperandUtils.getDefaultLabels()).endMetadata().withType(type).withData(data).build();
// setting the ManagedKafka has owner of the Secret resource is needed
// by the operator sdk to handle events on the Secret resource properly
OperandUtils.setAsOwner(managedKafka, secret);
return secret;
}
Aggregations