use of org.bf2.cos.fleetshard.api.Operator in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class CamelOperandController method doReify.
@SuppressFBWarnings("HARD_CODE_PASSWORD")
@Override
protected List<HasMetadata> doReify(ManagedConnector connector, CamelShardMetadata shardMetadata, ConnectorConfiguration<ObjectNode, ObjectNode> connectorConfiguration, ServiceAccountSpec serviceAccountSpec) {
final Map<String, String> properties = createSecretsData(connector, connectorConfiguration, serviceAccountSpec, configuration);
final ObjectNode errorHandler = createErrorHandler(shardMetadata, connector, connectorConfiguration.getErrorHandlerSpec());
final List<KameletEndpoint> stepDefinitions;
final KameletEndpoint source;
final KameletEndpoint sink;
//
switch(shardMetadata.getConnectorType()) {
case CONNECTOR_TYPE_SOURCE:
source = KameletEndpoint.kamelet(shardMetadata.getKamelets().getAdapter().getName());
source.getProperties().put("id", connector.getSpec().getDeploymentId() + "-source");
configureKameletProperties(source.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getAdapter());
sink = KameletEndpoint.kamelet(shardMetadata.getKamelets().getKafka().getName());
sink.getProperties().put("id", connector.getSpec().getDeploymentId() + "-sink");
sink.getProperties().put("bootstrapServers", connector.getSpec().getDeployment().getKafka().getUrl());
sink.getProperties().put("user", SA_CLIENT_ID_PLACEHOLDER);
sink.getProperties().put("password", SA_CLIENT_SECRET_PLACEHOLDER);
configureKameletProperties(sink.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getKafka());
if (hasSchemaRegistry(connector)) {
sink.getProperties().put("registryUrl", connector.getSpec().getDeployment().getSchemaRegistry().getUrl());
}
stepDefinitions = createSteps(connector, connectorConfiguration, shardMetadata, sink);
break;
case CONNECTOR_TYPE_SINK:
source = KameletEndpoint.kamelet(shardMetadata.getKamelets().getKafka().getName());
source.getProperties().put("id", connector.getSpec().getDeploymentId() + "-source");
source.getProperties().put("consumerGroup", connector.getSpec().getDeploymentId());
source.getProperties().put("bootstrapServers", connector.getSpec().getDeployment().getKafka().getUrl());
source.getProperties().put("user", SA_CLIENT_ID_PLACEHOLDER);
source.getProperties().put("password", SA_CLIENT_SECRET_PLACEHOLDER);
configureKameletProperties(source.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getKafka());
if (hasSchemaRegistry(connector)) {
source.getProperties().put("registryUrl", connector.getSpec().getDeployment().getSchemaRegistry().getUrl());
}
sink = KameletEndpoint.kamelet(shardMetadata.getKamelets().getAdapter().getName());
sink.getProperties().put("id", connector.getSpec().getDeploymentId() + "-sink");
configureKameletProperties(sink.getProperties(), connectorConfiguration.getConnectorSpec(), shardMetadata.getKamelets().getAdapter());
stepDefinitions = createSteps(connector, connectorConfiguration, shardMetadata, source);
break;
default:
throw new IllegalArgumentException("Unknown connector type: " + shardMetadata.getConnectorType());
}
final Secret secret = new Secret();
secret.setMetadata(new ObjectMeta());
secret.getMetadata().setName(connector.getMetadata().getName() + Resources.CONNECTOR_SECRET_SUFFIX);
secret.setData(Map.of(APPLICATION_PROPERTIES, asBytesBase64(properties)));
final ObjectNode integration = createIntegrationSpec(secret.getMetadata().getName(), configuration, Map.of("CONNECTOR_SECRET_NAME", secret.getMetadata().getName(), "CONNECTOR_SECRET_CHECKSUM", Secrets.computeChecksum(secret), "CONNECTOR_ID", connector.getSpec().getConnectorId(), "CONNECTOR_DEPLOYMENT_ID", connector.getSpec().getDeploymentId()));
final KameletBinding binding = new KameletBinding();
binding.setMetadata(new ObjectMeta());
binding.getMetadata().setName(connector.getMetadata().getName());
binding.getMetadata().setAnnotations(new TreeMap<>());
binding.setSpec(new KameletBindingSpec());
binding.getSpec().setSource(source);
binding.getSpec().setSink(sink);
binding.getSpec().setErrorHandler(errorHandler);
binding.getSpec().setSteps(stepDefinitions);
binding.getSpec().setIntegration(integration);
Map<String, String> annotations = binding.getMetadata().getAnnotations();
if (shardMetadata.getAnnotations() != null) {
annotations.putAll(shardMetadata.getAnnotations());
}
if (configuration.labelSelection().enabled()) {
Operator assigned = connector.getStatus().getConnectorStatus().getAssignedOperator();
if (assigned != null && assigned.getId() != null) {
annotations.putIfAbsent(KAMEL_OPERATOR_ID, assigned.getId());
}
}
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_CONTAINER_IMAGE, shardMetadata.getConnectorImage());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_KAMELETS_ENABLED, "false");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_JVM_ENABLED, "false");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_LOGGING_JSON, "false");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_OWNER_TARGET_LABELS, LABELS_TO_TRANSFER);
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_PROMETHEUS_ENABLED, "true");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_PROMETHEUS_POD_MONITOR, "false");
// health check annotations
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_ENABLED, "true");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_PROBE_ENABLED, "true");
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_PROBE_ENABLED, "true");
CamelOperandConfiguration.Health health = configuration.health();
if (health != null) {
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_SUCCESS_THRESHOLD, health.readinessSuccessThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_FAILURE_THRESHOLD, health.readinessFailureThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_PERIOD, health.readinessPeriodSeconds());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_READINESS_TIMEOUT, health.readinessTimeoutSeconds());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_SUCCESS_THRESHOLD, health.livenessSuccessThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_FAILURE_THRESHOLD, health.livenessFailureThreshold());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_PERIOD, health.livenessPeriodSeconds());
annotations.putIfAbsent(TRAIT_CAMEL_APACHE_ORG_HEALTH_LIVENESS_TIMEOUT, health.livenessTimeoutSeconds());
}
if (configuration.connectors() != null) {
if (configuration.connectors().traits() != null) {
annotations.putAll(configuration.connectors().traits());
}
if (configuration.connectors().types() != null) {
final String typeId = connector.getSpec().getDeployment().getConnectorTypeId();
final CamelOperandConfiguration.ConnectorConfiguration typeConfig = configuration.connectors().types().get(typeId);
if (typeConfig != null && typeConfig.traits() != null) {
annotations.putAll(typeConfig.traits());
}
}
}
return List.of(secret, binding);
}
use of org.bf2.cos.fleetshard.api.Operator in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorController method handleMonitor.
private UpdateControl<ManagedConnector> handleMonitor(ManagedConnector connector) {
operandController.status(connector);
//
// Search for newly installed ManagedOperators
//
final List<Operator> operators = fleetShard.lookupOperators();
final Operator assignedOperator = connector.getStatus().getConnectorStatus().getAssignedOperator();
final Operator availableOperator = connector.getStatus().getConnectorStatus().getAvailableOperator();
final Optional<Operator> selected = available(connector.getSpec().getOperatorSelector(), operators);
if (selected.isPresent()) {
Operator selectedInstance = selected.get();
// if the selected operator does match the operator preciously selected
if (!Objects.equals(selectedInstance, availableOperator) && !Objects.equals(selectedInstance, assignedOperator)) {
// and it is not the currently assigned one
LOGGER.info("deployment (upd): {} -> from:{}, to: {}", connector.getSpec().getDeployment(), assignedOperator, selectedInstance);
// then we can signal that an upgrade is possible
connector.getStatus().getConnectorStatus().setAvailableOperator(selectedInstance);
}
} else {
connector.getStatus().getConnectorStatus().setAvailableOperator(new Operator());
}
return UpdateControl.updateStatus(connector);
}
use of org.bf2.cos.fleetshard.api.Operator in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class OperatorSelectorUtil method available.
public static Optional<Operator> available(OperatorSelector selector, Collection<Operator> operators) {
if (operators == null) {
return Optional.empty();
}
if (operators.isEmpty()) {
return Optional.empty();
}
final VersionRange range = new VersionRange(selector.getVersion());
final Comparator<Operator> cmp = Comparator.comparing(o -> new Version(o.getVersion()));
return operators.stream().filter(o -> Objects.equals(o.getType(), selector.getType())).filter(o -> versionInRange(range, o)).max(cmp);
}
use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaCluster method kafkaFrom.
/* test */
@Override
public Kafka kafkaFrom(ManagedKafka managedKafka, Kafka current) {
KafkaBuilder builder = current != null ? new KafkaBuilder(current) : new KafkaBuilder();
int actualReplicas = getBrokerReplicas(managedKafka, current);
int desiredReplicas = getBrokerReplicas(managedKafka, null);
KafkaBuilder kafkaBuilder = builder.editOrNewMetadata().withName(kafkaClusterName(managedKafka)).withNamespace(kafkaClusterNamespace(managedKafka)).withLabels(buildKafkaLabels(managedKafka)).withAnnotations(buildKafkaAnnotations(managedKafka, current)).addToAnnotations(REPLICAS, String.valueOf(desiredReplicas)).endMetadata().editOrNewSpec().editOrNewKafka().withVersion(this.kafkaManager.currentKafkaVersion(managedKafka)).withConfig(buildKafkaConfig(managedKafka, current)).withReplicas(actualReplicas).withResources(buildKafkaResources(managedKafka)).withJvmOptions(buildKafkaJvmOptions(managedKafka)).withStorage(buildKafkaStorage(managedKafka, current)).withListeners(buildListeners(managedKafka, actualReplicas)).withRack(buildKafkaRack(managedKafka)).withTemplate(buildKafkaTemplate(managedKafka)).withMetricsConfig(buildKafkaMetricsConfig(managedKafka)).withAuthorization(buildKafkaAuthorization(managedKafka)).withImage(kafkaImage.orElse(null)).withExternalLogging(buildKafkaExternalLogging(managedKafka)).endKafka().editOrNewZookeeper().withReplicas(this.config.getZookeeper().getReplicas()).withStorage((SingleVolumeStorage) buildZooKeeperStorage(current)).withResources(buildZooKeeperResources(managedKafka)).withJvmOptions(buildZooKeeperJvmOptions(managedKafka)).withTemplate(buildZookeeperTemplate(managedKafka)).withMetricsConfig(buildZooKeeperMetricsConfig(managedKafka)).withImage(zookeeperImage.orElse(null)).withExternalLogging(buildZookeeperExternalLogging(managedKafka)).endZookeeper().withKafkaExporter(buildKafkaExporter(managedKafka)).endSpec();
Kafka kafka = this.upgrade(managedKafka, kafkaBuilder);
// setting the ManagedKafka as owner of the Kafka resource is needed
// by the operator sdk to handle events on the Kafka resource properly
OperandUtils.setAsOwner(managedKafka, kafka);
return kafka;
}
use of org.bf2.cos.fleetshard.api.Operator in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class SecuritySecretManager method buildSecretFrom.
private static Secret buildSecretFrom(String name, String type, ManagedKafka managedKafka, Secret current, Map<String, String> dataSource) {
SecretBuilder builder = current != null ? new SecretBuilder(current) : new SecretBuilder();
Map<String, String> data = dataSource.entrySet().stream().map(entry -> Map.entry(entry.getKey(), encode(entry.getValue()))).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
Secret secret = builder.editOrNewMetadata().withNamespace(kafkaClusterNamespace(managedKafka)).withName(name).withLabels(OperandUtils.getDefaultLabels()).endMetadata().withType(type).withData(data).build();
// setting the ManagedKafka has owner of the Secret resource is needed
// by the operator sdk to handle events on the Secret resource properly
OperandUtils.setAsOwner(managedKafka, secret);
return secret;
}
Aggregations