use of org.bf2.cos.fleetshard.support.resources.Resources in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.
the class FileQuotaPlansService method reconcile.
private void reconcile() {
log.info("Performing quota plan reconciliation");
var allRegistries = storage.getAllRegistries();
var updatedCount = 0;
for (RegistryData registry : allRegistries) {
var tid = registry.getId();
var tmc = Utils.createTenantManagerConfig(registry.getRegistryDeployment());
try {
var tenant = tmClient.getTenantById(tmc, tid).orElseThrow();
Map<String, Long> tenantLimits = new HashMap<>();
for (TenantLimit resource : tenant.getResources()) {
tenantLimits.put(resource.getType(), resource.getLimit());
}
var targetPlan = determineQuotaPlan(registry.getOrgId());
var requiresUpdate = false;
// Compare limits
for (TenantLimit targetLimit : targetPlan.getResources()) {
var v = tenantLimits.get(targetLimit.getType());
if (v == null || !v.equals(targetLimit.getLimit())) {
requiresUpdate = true;
break;
}
}
if (requiresUpdate) {
UpdateTenantRequest utr = UpdateTenantRequest.builder().id(tid).status(tenant.getStatus()).resources(targetPlan.getResources()).build();
tmClient.updateTenant(tmc, utr);
updatedCount++;
}
} catch (TenantManagerServiceException | NoSuchElementException | TenantNotFoundServiceException e) {
log.warn("Could not get or update tenant " + tid + " during quota plan reconciliation", e);
}
}
log.info("Quota plan reconciliation successful. Updated {} out of {} tenants", updatedCount, allRegistries.size());
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method reify.
@Test
void reify() {
KubernetesClient kubernetesClient = Mockito.mock(KubernetesClient.class);
DebeziumOperandController controller = new DebeziumOperandController(kubernetesClient, CONFIGURATION);
final String kcsB64 = Base64.getEncoder().encodeToString("kcs".getBytes(StandardCharsets.UTF_8));
final String pwdB64 = Base64.getEncoder().encodeToString("orderpw".getBytes(StandardCharsets.UTF_8));
var spec = Serialization.jsonMapper().createObjectNode().put("database.hostname", "orderdb").put("database.port", "5432").put("database.user", "orderuser").put("database.dbname", "orderdb").put("database.server.name", "dbserver1").put("schema.include.list", "purchaseorder").put("table.include.list", "purchaseorder.outboxevent").put("tombstones.on.delete", "false").put("key.converter", "org.apache.kafka.connect.storage.StringConverter").put("value.converter", "org.apache.kafka.connect.storage.StringConverter").put("transforms", "saga").put("transforms.saga.type", "io.debezium.transforms.outbox.EventRouter").put("transforms.saga.route.topic.replacement", "${routedByValue}.request").put("poll.interval.ms", "100").put("consumer.interceptor.classes", "io.opentracing.contrib.kafka.TracingConsumerInterceptor").put("producer.interceptor.classes", "io.opentracing.contrib.kafka.TracingProducerInterceptor");
spec.with("data_shape").put("key", "JSON").put("value", "JSON");
spec.with("database.password").put("kind", "base64").put("value", pwdB64);
var resources = controller.doReify(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(DEFAULT_MANAGED_CONNECTOR_ID).build()).withSpec(new ManagedConnectorSpecBuilder().withConnectorId(DEFAULT_MANAGED_CONNECTOR_ID).withDeploymentId(DEFAULT_DEPLOYMENT_ID).withDeployment(new DeploymentSpecBuilder().withConnectorTypeId(DEFAULT_CONNECTOR_TYPE_ID).withSecret("secret").withKafka(new KafkaSpecBuilder().withUrl(DEFAULT_KAFKA_SERVER).build()).withConnectorResourceVersion(DEFAULT_CONNECTOR_REVISION).withDeploymentResourceVersion(DEFAULT_DEPLOYMENT_REVISION).withDesiredState(DESIRED_STATE_READY).build()).build()).build(), new org.bf2.cos.fleetshard.operator.debezium.DebeziumShardMetadataBuilder().withContainerImage(DEFAULT_CONNECTOR_IMAGE).withConnectorClass(PG_CLASS).build(), new ConnectorConfiguration<>(spec, ObjectNode.class), new ServiceAccountSpecBuilder().withClientId(DEFAULT_KAFKA_CLIENT_ID).withClientSecret(kcsB64).build());
assertThat(resources).anyMatch(DebeziumOperandSupport::isKafkaConnect).anyMatch(DebeziumOperandSupport::isKafkaConnector).anyMatch(DebeziumOperandSupport::isSecret);
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnect).hasSize(1).first().isInstanceOfSatisfying(KafkaConnect.class, kc -> {
assertThat(kc.getSpec().getImage()).isEqualTo(DEFAULT_CONNECTOR_IMAGE);
});
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnector).hasSize(1).first().isInstanceOfSatisfying(KafkaConnector.class, kc -> {
assertThat(kc.getSpec().getConfig()).containsEntry("database.password", "${file:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + "/" + EXTERNAL_CONFIG_FILE + ":database.password}");
});
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorController method handleAugmentation.
private UpdateControl<ManagedConnector> handleAugmentation(ManagedConnector connector) {
if (connector.getSpec().getDeployment().getSecret() == null) {
LOGGER.info("Secret for deployment not defines");
return UpdateControl.noUpdate();
}
Secret secret = kubernetesClient.secrets().inNamespace(connector.getMetadata().getNamespace()).withName(connector.getSpec().getDeployment().getSecret()).get();
if (secret == null) {
boolean retry = hasCondition(connector, ManagedConnectorConditions.Type.Augmentation, ManagedConnectorConditions.Status.False, "SecretNotFound");
if (!retry) {
LOGGER.debug("Unable to find secret with name: {}", connector.getSpec().getDeployment().getSecret());
setCondition(connector, ManagedConnectorConditions.Type.Augmentation, ManagedConnectorConditions.Status.False, "SecretNotFound", "Unable to find secret with name: " + connector.getSpec().getDeployment().getSecret());
setCondition(connector, ManagedConnectorConditions.Type.Ready, ManagedConnectorConditions.Status.False, "AugmentationError", "AugmentationError");
return UpdateControl.updateStatus(connector);
} else {
return UpdateControl.<ManagedConnector>noUpdate().rescheduleAfter(1500, TimeUnit.MILLISECONDS);
}
} else {
final String connectorUow = connector.getSpec().getDeployment().getUnitOfWork();
final String secretUow = secret.getMetadata().getLabels().get(Resources.LABEL_UOW);
if (!Objects.equals(connectorUow, secretUow)) {
boolean retry = hasCondition(connector, ManagedConnectorConditions.Type.Augmentation, ManagedConnectorConditions.Status.False, "SecretUoWMismatch");
if (!retry) {
LOGGER.debug("Secret and Connector UoW mismatch (connector: {}, secret: {})", connectorUow, secretUow);
setCondition(connector, ManagedConnectorConditions.Type.Augmentation, ManagedConnectorConditions.Status.False, "SecretUoWMismatch", "Secret and Connector UoW mismatch (connector: " + connectorUow + ", secret: " + secretUow + ")");
setCondition(connector, ManagedConnectorConditions.Type.Ready, ManagedConnectorConditions.Status.False, "AugmentationError", "AugmentationError");
return UpdateControl.updateStatus(connector);
} else {
return UpdateControl.<ManagedConnector>noUpdate().rescheduleAfter(1500, TimeUnit.MILLISECONDS);
}
}
}
List<HasMetadata> resources;
try {
resources = operandController.reify(connector, secret);
} catch (Exception e) {
LOGGER.warn("Error reifying deployment {}", connector.getSpec().getDeploymentId(), e);
setCondition(connector, ManagedConnectorConditions.Type.Augmentation, ManagedConnectorConditions.Status.False, "ReifyFailed", e instanceof WrappedRuntimeException ? e.getCause().getMessage() : e.getMessage());
setCondition(connector, ManagedConnectorConditions.Type.Stopping, ManagedConnectorConditions.Status.True, "Stopping", "Stopping");
connector.getStatus().setDeployment(connector.getSpec().getDeployment());
connector.getStatus().setPhase(ManagedConnectorStatus.PhaseType.Stopping);
connector.getStatus().getConnectorStatus().setPhase(STATE_FAILED);
connector.getStatus().getConnectorStatus().setConditions(Collections.emptyList());
return UpdateControl.updateStatus(connector);
}
for (var resource : resources) {
if (resource.getMetadata().getLabels() == null) {
resource.getMetadata().setLabels(new HashMap<>());
}
if (resource.getMetadata().getAnnotations() == null) {
resource.getMetadata().setAnnotations(new HashMap<>());
}
ManagedConnectorSpec spec = connector.getSpec();
final String rv = String.valueOf(spec.getDeployment().getDeploymentResourceVersion());
final Map<String, String> labels = KubernetesResourceUtil.getOrCreateLabels(resource);
labels.put(LABEL_CONNECTOR_OPERATOR, connector.getStatus().getConnectorStatus().getAssignedOperator().getId());
labels.put(LABEL_CONNECTOR_ID, spec.getConnectorId());
labels.put(LABEL_CONNECTOR_TYPE_ID, spec.getDeployment().getConnectorTypeId());
labels.put(LABEL_DEPLOYMENT_ID, spec.getDeploymentId());
labels.put(LABEL_CLUSTER_ID, spec.getClusterId());
labels.put(LABEL_OPERATOR_TYPE, managedConnectorOperator.getSpec().getType());
labels.put(LABEL_OPERATOR_OWNER, managedConnectorOperator.getMetadata().getName());
labels.put(LABEL_DEPLOYMENT_RESOURCE_VERSION, rv);
// Kubernetes recommended labels
labels.put(LABEL_KUBERNETES_NAME, spec.getConnectorId());
labels.put(LABEL_KUBERNETES_INSTANCE, spec.getDeploymentId());
labels.put(LABEL_KUBERNETES_VERSION, rv);
labels.put(LABEL_KUBERNETES_COMPONENT, Resources.COMPONENT_CONNECTOR);
labels.put(LABEL_KUBERNETES_PART_OF, spec.getClusterId());
labels.put(LABEL_KUBERNETES_MANAGED_BY, managedConnectorOperator.getMetadata().getName());
labels.put(LABEL_KUBERNETES_CREATED_BY, managedConnectorOperator.getMetadata().getName());
config.connectors().targetLabels().ifPresent(items -> {
for (String item : items) {
copyLabel(item, connector, resource);
}
});
config.connectors().targetAnnotations().ifPresent(items -> {
for (String item : items) {
copyAnnotation(item, connector, resource);
}
});
resource.getMetadata().setOwnerReferences(List.of(new OwnerReferenceBuilder().withApiVersion(connector.getApiVersion()).withKind(connector.getKind()).withName(connector.getMetadata().getName()).withUid(connector.getMetadata().getUid()).withAdditionalProperties(Map.of("namespace", connector.getMetadata().getNamespace())).withBlockOwnerDeletion(true).build()));
var result = kubernetesClient.resource(resource).inNamespace(connector.getMetadata().getNamespace()).createOrReplace();
LOGGER.debug("Resource {}:{}:{}@{} updated/created", result.getApiVersion(), result.getKind(), result.getMetadata().getName(), result.getMetadata().getNamespace());
}
connector.getStatus().setDeployment(connector.getSpec().getDeployment());
connector.getStatus().setPhase(ManagedConnectorStatus.PhaseType.Monitor);
connector.getStatus().getConnectorStatus().setConditions(Collections.emptyList());
setCondition(connector, ManagedConnectorConditions.Type.Resync, false);
setCondition(connector, ManagedConnectorConditions.Type.Monitor, true);
setCondition(connector, ManagedConnectorConditions.Type.Ready, true);
setCondition(connector, ManagedConnectorConditions.Type.Augmentation, true);
return UpdateControl.updateStatus(connector);
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorProvisionerTest method updateResources.
@Test
void updateResources() {
//
// Given that the resources associated to the provided deployment exist
//
final ConnectorDeployment oldDeployment = createDeployment(0);
final List<ManagedConnector> connectors = List.of(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(Connectors.generateConnectorId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).build()).build());
final List<Secret> secrets = List.of(new SecretBuilder().withMetadata(new ObjectMetaBuilder().withName(Secrets.generateConnectorSecretId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).addToLabels(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + oldDeployment.getMetadata().getResourceVersion()).build()).build());
final FleetShardClient fleetShard = ConnectorTestSupport.fleetShard(CLUSTER_ID, connectors, secrets);
final FleetManagerClient fleetManager = ConnectorTestSupport.fleetManagerClient();
final FleetShardSyncConfig config = ConnectorTestSupport.config();
final MeterRegistry registry = Mockito.mock(MeterRegistry.class);
final ConnectorDeploymentProvisioner provisioner = new ConnectorDeploymentProvisioner(config, fleetShard, fleetManager, registry);
final ArgumentCaptor<Secret> sc = ArgumentCaptor.forClass(Secret.class);
final ArgumentCaptor<ManagedConnector> mcc = ArgumentCaptor.forClass(ManagedConnector.class);
//
// When deployment is updated
//
final ConnectorDeployment newDeployment = createDeployment(0, d -> {
d.getSpec().getKafka().setUrl("my-kafka.acme.com:218");
((ObjectNode) d.getSpec().getConnectorSpec()).with("connector").put("foo", "connector-baz");
((ObjectNode) d.getSpec().getShardMetadata()).put("connector_image", "quay.io/mcs_dev/aws-s3-sink:0.1.0");
});
provisioner.provision(newDeployment);
verify(fleetShard).createSecret(sc.capture());
verify(fleetShard).createConnector(mcc.capture());
//
// Then the existing resources must be updated to reflect the changes made to the
// deployment. This scenario could happen when a resource on the connector cluster
// is amended outside the control of fleet manager (i.e. with kubectl) and in such
// case, the expected behavior is that the resource is re-set to the configuration
// from the fleet manager.
//
assertThat(sc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Secrets.generateConnectorSecretId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, newDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, newDeployment.getId()).containsEntry(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + newDeployment.getMetadata().getResourceVersion()).containsKey(LABEL_UOW);
assertThat(val.getData()).containsKey(Secrets.SECRET_ENTRY_SERVICE_ACCOUNT).containsKey(Secrets.SECRET_ENTRY_CONNECTOR);
var serviceAccountNode = Secrets.extract(val, Secrets.SECRET_ENTRY_SERVICE_ACCOUNT, ServiceAccount.class);
assertThat(serviceAccountNode.getClientSecret()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientSecret());
assertThat(serviceAccountNode.getClientId()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientId());
var connectorNode = Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR);
assertThatJson(Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR)).inPath("connector.foo").isEqualTo("connector-baz");
assertThatJson(connectorNode).inPath("kafka.topic").isEqualTo("kafka-foo");
var metaNode = Secrets.extract(val, Secrets.SECRET_ENTRY_META);
assertThatJson(metaNode).isObject().containsKey("connector_type").containsKey("connector_image").containsKey("kamelets").containsKey("operators");
});
assertThat(mcc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Connectors.generateConnectorId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).containsKey(LABEL_UOW);
assertThat(val.getSpec().getDeployment()).satisfies(d -> {
assertThat(d.getDeploymentResourceVersion()).isEqualTo(oldDeployment.getMetadata().getResourceVersion());
assertThat(d.getDeploymentResourceVersion()).isEqualTo(newDeployment.getMetadata().getResourceVersion());
assertThat(d.getSecret()).isEqualTo(sc.getValue().getMetadata().getName());
assertThat(d.getUnitOfWork()).isNotEmpty().isEqualTo(sc.getValue().getMetadata().getLabels().get(LABEL_UOW));
assertThat(d.getKafka().getUrl()).isNotEmpty().isEqualTo(newDeployment.getSpec().getKafka().getUrl());
});
});
}
use of org.bf2.cos.fleetshard.support.resources.Resources in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorProvisionerTest method updateAndCreateResources.
@Test
void updateAndCreateResources() {
//
// Given that the resources associated to the provided deployment exist
//
final ConnectorDeployment oldDeployment = createDeployment(0);
final List<ManagedConnector> connectors = List.of(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(Connectors.generateConnectorId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).build()).build());
final List<Secret> secrets = List.of(new SecretBuilder().withMetadata(new ObjectMetaBuilder().withName(Secrets.generateConnectorSecretId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).addToLabels(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + oldDeployment.getMetadata().getResourceVersion()).build()).build());
final FleetShardClient fleetShard = ConnectorTestSupport.fleetShard(CLUSTER_ID, connectors, secrets);
final FleetManagerClient fleetManager = ConnectorTestSupport.fleetManagerClient();
final FleetShardSyncConfig config = ConnectorTestSupport.config();
final MeterRegistry registry = Mockito.mock(MeterRegistry.class);
final ConnectorDeploymentProvisioner provisioner = new ConnectorDeploymentProvisioner(config, fleetShard, fleetManager, registry);
final ArgumentCaptor<Secret> sc = ArgumentCaptor.forClass(Secret.class);
final ArgumentCaptor<ManagedConnector> mcc = ArgumentCaptor.forClass(ManagedConnector.class);
//
// When a change to the deployment happen that ends up with a new resource version
//
final ConnectorDeployment newDeployment = createDeployment(1, d -> {
d.getMetadata().setResourceVersion(1L);
d.getSpec().getKafka().setUrl("my-kafka.acme.com:218");
((ObjectNode) d.getSpec().getConnectorSpec()).with("connector").put("foo", "connector-baz");
((ObjectNode) d.getSpec().getShardMetadata()).put("connector_image", "quay.io/mcs_dev/aws-s3-sink:0.1.0");
});
provisioner.provision(newDeployment);
verify(fleetShard).createSecret(sc.capture());
verify(fleetShard).createConnector(mcc.capture());
//
// Then the managed connector resource is expected to be updated to reflect the
// changes made to the deployment
//
assertThat(sc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Secrets.generateConnectorSecretId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, newDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, newDeployment.getId()).containsEntry(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + newDeployment.getMetadata().getResourceVersion()).containsKey(LABEL_UOW);
assertThat(val.getData()).containsKey(Secrets.SECRET_ENTRY_SERVICE_ACCOUNT).containsKey(Secrets.SECRET_ENTRY_CONNECTOR);
var serviceAccountNode = Secrets.extract(val, Secrets.SECRET_ENTRY_SERVICE_ACCOUNT, ServiceAccount.class);
assertThat(serviceAccountNode.getClientSecret()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientSecret());
assertThat(serviceAccountNode.getClientId()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientId());
var connectorNode = Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR);
assertThatJson(Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR)).inPath("connector.foo").isEqualTo("connector-baz");
assertThatJson(connectorNode).inPath("kafka.topic").isEqualTo("kafka-foo");
var metaNode = Secrets.extract(val, Secrets.SECRET_ENTRY_META);
assertThatJson(metaNode).isObject().containsKey("connector_type").containsKey("connector_image").containsKey("kamelets").containsKey("operators");
});
assertThat(mcc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Connectors.generateConnectorId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).containsKey(LABEL_UOW);
assertThat(val.getSpec().getDeployment()).satisfies(d -> {
assertThat(d.getDeploymentResourceVersion()).isEqualTo(newDeployment.getMetadata().getResourceVersion());
assertThat(d.getDeploymentResourceVersion()).isNotEqualTo(oldDeployment.getMetadata().getResourceVersion());
assertThat(d.getSecret()).isEqualTo(sc.getValue().getMetadata().getName());
assertThat(d.getUnitOfWork()).isNotEmpty().isEqualTo(sc.getValue().getMetadata().getLabels().get(LABEL_UOW));
assertThat(d.getKafka().getUrl()).isNotEmpty().isEqualTo(newDeployment.getSpec().getKafka().getUrl());
});
});
}
Aggregations