use of org.bf2.cos.fleetshard.api.ManagedConnectorBuilder in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorSteps method a_connector.
@Given("^a Connector with:$")
public void a_connector(Map<String, String> entry) {
final Long drv = Long.parseLong(entry.getOrDefault(ConnectorContext.COS_DEPLOYMENT_RESOURCE_VERSION, "1"));
final Long crv = Long.parseLong(entry.getOrDefault(ConnectorContext.COS_CONNECTOR_RESOURCE_VERSION, "1"));
final String connectorId = entry.getOrDefault(ConnectorContext.COS_CONNECTOR_ID, uid());
final String deploymentId = entry.getOrDefault(ConnectorContext.COS_DEPLOYMENT_ID, uid());
final String clusterId = ctx.clusterId();
var connector = new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().addToLabels(Resources.LABEL_CLUSTER_ID, clusterId).addToLabels(Resources.LABEL_CONNECTOR_ID, connectorId).addToLabels(Resources.LABEL_DEPLOYMENT_ID, deploymentId).addToLabels(Resources.LABEL_OPERATOR_TYPE, entry.get(ConnectorContext.OPERATOR_TYPE)).withName(Connectors.generateConnectorId(deploymentId)).build()).withSpec(new ManagedConnectorSpecBuilder().withClusterId(clusterId).withConnectorId(connectorId).withDeploymentId(deploymentId).withDeployment(new DeploymentSpecBuilder().withConnectorResourceVersion(crv).withConnectorTypeId(entry.get(ConnectorContext.CONNECTOR_TYPE_ID)).withDeploymentResourceVersion(drv).withKafka(new KafkaSpecBuilder().withUrl(entry.getOrDefault("kafka.bootstrap", "kafka.acme.com:443")).build()).withDesiredState(entry.get(ConnectorContext.DESIRED_STATE)).withSecret(Connectors.generateConnectorId(deploymentId) + "-" + drv).build()).withOperatorSelector(new OperatorSelectorBuilder().withId(entry.get(ConnectorContext.OPERATOR_ID)).withType(entry.get(ConnectorContext.OPERATOR_TYPE)).withVersion(entry.get(ConnectorContext.OPERATOR_VERSION)).build()).build()).build();
var secret = new SecretBuilder().withMetadata(new ObjectMetaBuilder().addToLabels(Resources.LABEL_OPERATOR_TYPE, entry.get(ConnectorContext.OPERATOR_TYPE)).withName(connector.getMetadata().getName() + "-" + connector.getSpec().getDeployment().getDeploymentResourceVersion()).build()).withData(new HashMap<>()).addToData(Secrets.SECRET_ENTRY_SERVICE_ACCOUNT, Secrets.toBase64(Serialization.asJson(Serialization.jsonMapper().createObjectNode().put("client_id", entry.getOrDefault("kafka.client.id", uid())).put("client_secret", entry.getOrDefault("kafka.client.secret", Secrets.toBase64(uid())))))).build();
ctx.connector(connector);
ctx.secret(secret);
}
use of org.bf2.cos.fleetshard.api.ManagedConnectorBuilder in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method reify.
@Test
void reify() {
KubernetesClient kubernetesClient = Mockito.mock(KubernetesClient.class);
DebeziumOperandController controller = new DebeziumOperandController(kubernetesClient, CONFIGURATION);
final String kcsB64 = Base64.getEncoder().encodeToString("kcs".getBytes(StandardCharsets.UTF_8));
final String pwdB64 = Base64.getEncoder().encodeToString("orderpw".getBytes(StandardCharsets.UTF_8));
var spec = Serialization.jsonMapper().createObjectNode().put("database.hostname", "orderdb").put("database.port", "5432").put("database.user", "orderuser").put("database.dbname", "orderdb").put("database.server.name", "dbserver1").put("schema.include.list", "purchaseorder").put("table.include.list", "purchaseorder.outboxevent").put("tombstones.on.delete", "false").put("key.converter", "org.apache.kafka.connect.storage.StringConverter").put("value.converter", "org.apache.kafka.connect.storage.StringConverter").put("transforms", "saga").put("transforms.saga.type", "io.debezium.transforms.outbox.EventRouter").put("transforms.saga.route.topic.replacement", "${routedByValue}.request").put("poll.interval.ms", "100").put("consumer.interceptor.classes", "io.opentracing.contrib.kafka.TracingConsumerInterceptor").put("producer.interceptor.classes", "io.opentracing.contrib.kafka.TracingProducerInterceptor");
spec.with("data_shape").put("key", "JSON").put("value", "JSON");
spec.with("database.password").put("kind", "base64").put("value", pwdB64);
var resources = controller.doReify(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(DEFAULT_MANAGED_CONNECTOR_ID).build()).withSpec(new ManagedConnectorSpecBuilder().withConnectorId(DEFAULT_MANAGED_CONNECTOR_ID).withDeploymentId(DEFAULT_DEPLOYMENT_ID).withDeployment(new DeploymentSpecBuilder().withConnectorTypeId(DEFAULT_CONNECTOR_TYPE_ID).withSecret("secret").withKafka(new KafkaSpecBuilder().withUrl(DEFAULT_KAFKA_SERVER).build()).withConnectorResourceVersion(DEFAULT_CONNECTOR_REVISION).withDeploymentResourceVersion(DEFAULT_DEPLOYMENT_REVISION).withDesiredState(DESIRED_STATE_READY).build()).build()).build(), new org.bf2.cos.fleetshard.operator.debezium.DebeziumShardMetadataBuilder().withContainerImage(DEFAULT_CONNECTOR_IMAGE).withConnectorClass(PG_CLASS).build(), new ConnectorConfiguration<>(spec, ObjectNode.class), new ServiceAccountSpecBuilder().withClientId(DEFAULT_KAFKA_CLIENT_ID).withClientSecret(kcsB64).build());
assertThat(resources).anyMatch(DebeziumOperandSupport::isKafkaConnect).anyMatch(DebeziumOperandSupport::isKafkaConnector).anyMatch(DebeziumOperandSupport::isSecret);
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnect).hasSize(1).first().isInstanceOfSatisfying(KafkaConnect.class, kc -> {
assertThat(kc.getSpec().getImage()).isEqualTo(DEFAULT_CONNECTOR_IMAGE);
});
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnector).hasSize(1).first().isInstanceOfSatisfying(KafkaConnector.class, kc -> {
assertThat(kc.getSpec().getConfig()).containsEntry("database.password", "${file:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + "/" + EXTERNAL_CONFIG_FILE + ":database.password}");
});
}
use of org.bf2.cos.fleetshard.api.ManagedConnectorBuilder in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorProvisionerTest method updateResources.
@Test
void updateResources() {
//
// Given that the resources associated to the provided deployment exist
//
final ConnectorDeployment oldDeployment = createDeployment(0);
final List<ManagedConnector> connectors = List.of(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(Connectors.generateConnectorId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).build()).build());
final List<Secret> secrets = List.of(new SecretBuilder().withMetadata(new ObjectMetaBuilder().withName(Secrets.generateConnectorSecretId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).addToLabels(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + oldDeployment.getMetadata().getResourceVersion()).build()).build());
final FleetShardClient fleetShard = ConnectorTestSupport.fleetShard(CLUSTER_ID, connectors, secrets);
final ConnectorDeploymentProvisioner provisioner = new ConnectorDeploymentProvisioner(fleetShard);
final ArgumentCaptor<Secret> sc = ArgumentCaptor.forClass(Secret.class);
final ArgumentCaptor<ManagedConnector> mcc = ArgumentCaptor.forClass(ManagedConnector.class);
//
// When deployment is updated
//
final ConnectorDeployment newDeployment = createDeployment(0, d -> {
d.getSpec().getKafka().setUrl("my-kafka.acme.com:218");
((ObjectNode) d.getSpec().getConnectorSpec()).with("connector").put("foo", "connector-baz");
((ObjectNode) d.getSpec().getShardMetadata()).put("connector_image", "quay.io/mcs_dev/aws-s3-sink:0.1.0");
});
provisioner.provision(newDeployment);
verify(fleetShard).createSecret(sc.capture());
verify(fleetShard).createConnector(mcc.capture());
//
// Then the existing resources must be updated to reflect the changes made to the
// deployment. This scenario could happen when a resource on the connector cluster
// is amended outside the control of fleet manager (i.e. with kubectl) and in such
// case, the expected behavior is that the resource is re-set to the configuration
// from the fleet manager.
//
assertThat(sc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Secrets.generateConnectorSecretId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, newDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, newDeployment.getId()).containsEntry(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + newDeployment.getMetadata().getResourceVersion()).containsKey(LABEL_UOW);
assertThat(val.getData()).containsKey(Secrets.SECRET_ENTRY_SERVICE_ACCOUNT).containsKey(Secrets.SECRET_ENTRY_CONNECTOR);
var serviceAccountNode = Secrets.extract(val, Secrets.SECRET_ENTRY_SERVICE_ACCOUNT, ServiceAccount.class);
assertThat(serviceAccountNode.getClientSecret()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientSecret());
assertThat(serviceAccountNode.getClientId()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientId());
var connectorNode = Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR);
assertThatJson(Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR)).inPath("connector.foo").isEqualTo("connector-baz");
assertThatJson(connectorNode).inPath("kafka.topic").isEqualTo("kafka-foo");
var metaNode = Secrets.extract(val, Secrets.SECRET_ENTRY_META);
assertThatJson(metaNode).isObject().containsKey("connector_type").containsKey("connector_image").containsKey("kamelets").containsKey("operators");
});
assertThat(mcc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Connectors.generateConnectorId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).containsKey(LABEL_UOW);
assertThat(val.getSpec().getDeployment()).satisfies(d -> {
assertThat(d.getDeploymentResourceVersion()).isEqualTo(oldDeployment.getMetadata().getResourceVersion());
assertThat(d.getDeploymentResourceVersion()).isEqualTo(newDeployment.getMetadata().getResourceVersion());
assertThat(d.getSecret()).isEqualTo(sc.getValue().getMetadata().getName());
assertThat(d.getUnitOfWork()).isNotEmpty().isEqualTo(sc.getValue().getMetadata().getLabels().get(LABEL_UOW));
assertThat(d.getKafka().getUrl()).isNotEmpty().isEqualTo(newDeployment.getSpec().getKafka().getUrl());
});
});
}
use of org.bf2.cos.fleetshard.api.ManagedConnectorBuilder in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorProvisionerTest method updateAndCreateResources.
@Test
void updateAndCreateResources() {
//
// Given that the resources associated to the provided deployment exist
//
final ConnectorDeployment oldDeployment = createDeployment(0);
final List<ManagedConnector> connectors = List.of(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(Connectors.generateConnectorId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).build()).build());
final List<Secret> secrets = List.of(new SecretBuilder().withMetadata(new ObjectMetaBuilder().withName(Secrets.generateConnectorSecretId(oldDeployment.getId())).addToLabels(LABEL_CLUSTER_ID, CLUSTER_ID).addToLabels(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).addToLabels(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).addToLabels(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + oldDeployment.getMetadata().getResourceVersion()).build()).build());
final FleetShardClient fleetShard = ConnectorTestSupport.fleetShard(CLUSTER_ID, connectors, secrets);
final ConnectorDeploymentProvisioner provisioner = new ConnectorDeploymentProvisioner(fleetShard);
final ArgumentCaptor<Secret> sc = ArgumentCaptor.forClass(Secret.class);
final ArgumentCaptor<ManagedConnector> mcc = ArgumentCaptor.forClass(ManagedConnector.class);
//
// When a change to the deployment happen that ends up with a new resource version
//
final ConnectorDeployment newDeployment = createDeployment(1, d -> {
d.getMetadata().setResourceVersion(1L);
d.getSpec().getKafka().setUrl("my-kafka.acme.com:218");
((ObjectNode) d.getSpec().getConnectorSpec()).with("connector").put("foo", "connector-baz");
((ObjectNode) d.getSpec().getShardMetadata()).put("connector_image", "quay.io/mcs_dev/aws-s3-sink:0.1.0");
});
provisioner.provision(newDeployment);
verify(fleetShard).createSecret(sc.capture());
verify(fleetShard).createConnector(mcc.capture());
//
// Then the managed connector resource is expected to be updated to reflect the
// changes made to the deployment
//
assertThat(sc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Secrets.generateConnectorSecretId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, newDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, newDeployment.getId()).containsEntry(LABEL_DEPLOYMENT_RESOURCE_VERSION, "" + newDeployment.getMetadata().getResourceVersion()).containsKey(LABEL_UOW);
assertThat(val.getData()).containsKey(Secrets.SECRET_ENTRY_SERVICE_ACCOUNT).containsKey(Secrets.SECRET_ENTRY_CONNECTOR);
var serviceAccountNode = Secrets.extract(val, Secrets.SECRET_ENTRY_SERVICE_ACCOUNT, ServiceAccount.class);
assertThat(serviceAccountNode.getClientSecret()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientSecret());
assertThat(serviceAccountNode.getClientId()).isEqualTo(newDeployment.getSpec().getServiceAccount().getClientId());
var connectorNode = Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR);
assertThatJson(Secrets.extract(val, Secrets.SECRET_ENTRY_CONNECTOR)).inPath("connector.foo").isEqualTo("connector-baz");
assertThatJson(connectorNode).inPath("kafka.topic").isEqualTo("kafka-foo");
var metaNode = Secrets.extract(val, Secrets.SECRET_ENTRY_META);
assertThatJson(metaNode).isObject().containsKey("connector_type").containsKey("connector_image").containsKey("kamelets").containsKey("operators");
});
assertThat(mcc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getName()).isEqualTo(Connectors.generateConnectorId(oldDeployment.getId()));
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_CLUSTER_ID, CLUSTER_ID).containsEntry(LABEL_CONNECTOR_ID, oldDeployment.getSpec().getConnectorId()).containsEntry(LABEL_DEPLOYMENT_ID, oldDeployment.getId()).containsKey(LABEL_UOW);
assertThat(val.getSpec().getDeployment()).satisfies(d -> {
assertThat(d.getDeploymentResourceVersion()).isEqualTo(newDeployment.getMetadata().getResourceVersion());
assertThat(d.getDeploymentResourceVersion()).isNotEqualTo(oldDeployment.getMetadata().getResourceVersion());
assertThat(d.getSecret()).isEqualTo(sc.getValue().getMetadata().getName());
assertThat(d.getUnitOfWork()).isNotEmpty().isEqualTo(sc.getValue().getMetadata().getLabels().get(LABEL_UOW));
assertThat(d.getKafka().getUrl()).isNotEmpty().isEqualTo(newDeployment.getSpec().getKafka().getUrl());
});
});
}
use of org.bf2.cos.fleetshard.api.ManagedConnectorBuilder in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class ConnectorStatusExtractorTest method extractFromConnectorStatus.
/*
* Test that if the status sub resource is provided and the phase is
* "monitor", then the status extractor compute the phase according
* to the reported deployment status
*/
@ParameterizedTest
@MethodSource
void extractFromConnectorStatus(String statusDesiredState, String connectorPhase, String expectedState, List<Condition> conditions) {
var status = ConnectorStatusExtractor.extract(new ManagedConnectorBuilder().withSpec(new ManagedConnectorSpecBuilder().withOperatorSelector(new OperatorSelectorBuilder().withId("1").build()).build()).withStatus(new ManagedConnectorStatusBuilder().withPhase(ManagedConnectorStatus.PhaseType.Monitor).withDeployment(new DeploymentSpecBuilder().withDeploymentResourceVersion(1L).withDesiredState(statusDesiredState).build()).withConnectorStatus(new ConnectorStatusSpecBuilder().withPhase(connectorPhase).withConditions(conditions).build()).build()).build());
var v1Conditions = conditions.stream().map(ConnectorStatusExtractor::toMetaV1Condition).collect(Collectors.toList());
assertThat(status.getPhase()).isEqualTo(expectedState);
assertThat(status.getConditions()).hasSameSizeAs(conditions).hasSameElementsAs(v1Conditions);
assertThat(status.getResourceVersion()).isEqualTo(1L);
assertThat(status).extracting(ConnectorDeploymentStatus::getOperators).extracting(ConnectorDeploymentStatusOperators::getAssigned).hasAllNullFieldsOrProperties();
assertThat(status).extracting(ConnectorDeploymentStatus::getOperators).extracting(ConnectorDeploymentStatusOperators::getAvailable).hasAllNullFieldsOrProperties();
}
Aggregations