use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class NamespaceProvisionerTest method nameIsSanitized.
@Test
void nameIsSanitized() {
//
// Given that no resources associated to the provided deployment exist
//
final ConnectorNamespace namespace = new ConnectorNamespace();
namespace.id(uid());
namespace.name("--eval");
ConnectorNamespaceTenant tenant = new ConnectorNamespaceTenant().id(uid()).kind(ConnectorNamespaceTenantKind.ORGANISATION);
namespace.setStatus(new ConnectorNamespaceStatus1().state(ConnectorNamespaceState.READY).connectorsDeployed(0));
namespace.setTenant(tenant);
namespace.setExpiration(new Date().toString());
final List<ManagedConnector> connectors = List.of();
final List<Secret> secrets = List.of();
final FleetShardClient fleetShard = ConnectorTestSupport.fleetShard(CLUSTER_ID, connectors, secrets);
final FleetManagerClient fleetManager = ConnectorTestSupport.fleetManagerClient();
final FleetShardSyncConfig config = ConnectorTestSupport.config();
final MeterRegistry registry = Mockito.mock(MeterRegistry.class);
final ConnectorNamespaceProvisioner provisioner = new ConnectorNamespaceProvisioner(config, fleetShard, fleetManager, registry);
final ArgumentCaptor<Namespace> nc = ArgumentCaptor.forClass(Namespace.class);
//
// When deployment is applied
//
provisioner.provision(namespace);
verify(fleetShard).createNamespace(nc.capture());
//
// Then resources must be created according to the deployment
//
assertThat(nc.getValue()).satisfies(val -> {
assertThat(val.getMetadata().getLabels()).containsEntry(LABEL_KUBERNETES_NAME, "a--eval");
});
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class NamespaceProvisionerWithNoQuotaTestBase method namespaceIsProvisioned.
@Test
void namespaceIsProvisioned() {
final Config cfg = ConfigProvider.getConfig();
final String nsId1 = cfg.getValue("test.ns.id.1", String.class);
final NamespacedName pullSecret = new NamespacedName(client.generateNamespaceId(nsId1), config.imagePullSecretsName());
RestAssured.given().contentType(MediaType.TEXT_PLAIN).body(0L).post("/test/provisioner/namespaces");
Namespace ns = until(() -> fleetShardClient.getNamespace(nsId1), Objects::nonNull);
assertThat(ns).satisfies(item -> {
assertThat(item.getMetadata().getName()).isEqualTo(client.generateNamespaceId(nsId1));
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_CLUSTER_ID, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_NAMESPACE_ID, nsId1).containsEntry(Resources.LABEL_KUBERNETES_MANAGED_BY, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_CREATED_BY, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_PART_OF, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_COMPONENT, Resources.COMPONENT_NAMESPACE).containsEntry(Resources.LABEL_KUBERNETES_INSTANCE, nsId1).containsKey(Resources.LABEL_UOW);
assertThat(item.getMetadata().getAnnotations()).containsEntry(Resources.ANNOTATION_NAMESPACE_QUOTA, "false");
});
until(() -> fleetShardClient.getSecret(pullSecret).filter(ps -> {
return Objects.equals(ps.getMetadata().getLabels().get(Resources.LABEL_UOW), ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
}), Objects::nonNull);
untilAsserted(() -> {
ResourceQuota answer = fleetShardClient.getKubernetesClient().resourceQuotas().inNamespace(ns.getMetadata().getName()).withName(ns.getMetadata().getName() + "-quota").get();
assertThat(answer).isNull();
});
untilAsserted(() -> {
LimitRange answer = fleetShardClient.getKubernetesClient().limitRanges().inNamespace(ns.getMetadata().getName()).withName(ns.getMetadata().getName() + "-limits").get();
assertThat(answer).isNull();
});
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandController method doReify.
@Override
protected List<HasMetadata> doReify(ManagedConnector connector, DebeziumShardMetadata shardMetadata, ConnectorConfiguration<ObjectNode, DebeziumDataShape> connectorConfiguration, ServiceAccountSpec serviceAccountSpec) {
final Map<String, String> secretsData = createSecretsData(connectorConfiguration.getConnectorSpec());
final Secret secret = new SecretBuilder().withMetadata(new ObjectMetaBuilder().withName(connector.getMetadata().getName() + Resources.CONNECTOR_SECRET_SUFFIX).build()).addToData(EXTERNAL_CONFIG_FILE, asBytesBase64(secretsData)).addToData(KAFKA_CLIENT_SECRET_KEY, serviceAccountSpec.getClientSecret()).build();
ConfigMap kafkaConnectMetricsConfigMap = new ConfigMapBuilder().withNewMetadata().withName(connector.getMetadata().getName() + KAFKA_CONNECT_METRICS_CONFIGMAP_NAME_SUFFIX).endMetadata().addToData(METRICS_CONFIG_FILENAME, METRICS_CONFIG).build();
final KafkaConnectSpecBuilder kcsb = new KafkaConnectSpecBuilder().withReplicas(1).withBootstrapServers(connector.getSpec().getDeployment().getKafka().getUrl()).withKafkaClientAuthenticationPlain(new KafkaClientAuthenticationPlainBuilder().withUsername(serviceAccountSpec.getClientId()).withPasswordSecret(new PasswordSecretSourceBuilder().withSecretName(secret.getMetadata().getName()).withPassword(KAFKA_CLIENT_SECRET_KEY).build()).build()).addToConfig(DebeziumConstants.DEFAULT_CONFIG_OPTIONS).addToConfig(new TreeMap<>(configuration.kafkaConnect().config())).addToConfig("group.id", connector.getMetadata().getName()).addToConfig(KeyAndValueConverters.getConfig(connectorConfiguration.getDataShapeSpec(), connector, serviceAccountSpec)).addToConfig("offset.storage.topic", connector.getMetadata().getName() + "-offset").addToConfig("config.storage.topic", connector.getMetadata().getName() + "-config").addToConfig("status.storage.topic", connector.getMetadata().getName() + "-status").addToConfig("topic.creation.enable", "true").addToConfig("connector.secret.name", secret.getMetadata().getName()).addToConfig("connector.secret.checksum", Secrets.computeChecksum(secret)).withTls(new ClientTlsBuilder().withTrustedCertificates(Collections.emptyList()).build()).withTemplate(new KafkaConnectTemplateBuilder().withPod(new PodTemplateBuilder().withImagePullSecrets(configuration.imagePullSecretsName()).build()).build()).withJmxPrometheusExporterMetricsConfig(new JmxPrometheusExporterMetricsBuilder().withValueFrom(new ExternalConfigurationReferenceBuilder().withNewConfigMapKeyRef(METRICS_CONFIG_FILENAME, kafkaConnectMetricsConfigMap.getMetadata().getName(), false).build()).build()).withExternalConfiguration(new ExternalConfigurationBuilder().addToVolumes(new ExternalConfigurationVolumeSourceBuilder().withName(EXTERNAL_CONFIG_DIRECTORY).withSecret(new SecretVolumeSourceBuilder().withSecretName(secret.getMetadata().getName()).build()).build()).build()).withResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("10m")).addToRequests("memory", new Quantity("256Mi")).addToLimits("cpu", new Quantity("500m")).addToLimits("memory", new Quantity("1Gi")).build());
kcsb.withImage(shardMetadata.getContainerImage());
final KafkaConnect kc = new KafkaConnectBuilder().withApiVersion(Constants.RESOURCE_GROUP_NAME + "/" + KafkaConnect.CONSUMED_VERSION).withMetadata(new ObjectMetaBuilder().withName(connector.getMetadata().getName()).addToAnnotations(STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").build()).withSpec(kcsb.build()).build();
Map<String, Object> connectorConfig = createConfig(configuration, connectorConfiguration.getConnectorSpec());
// handle connector config defaults
switch(shardMetadata.getConnectorClass()) {
case CLASS_NAME_POSTGRES_CONNECTOR:
if (!connectorConfig.containsKey(CONFIG_OPTION_POSTGRES_PLUGIN_NAME)) {
connectorConfig.put(CONFIG_OPTION_POSTGRES_PLUGIN_NAME, PLUGIN_NAME_PGOUTPUT);
}
break;
default:
break;
}
if (isDatabaseHistorySupported(shardMetadata)) {
final Map<String, Object> databaseHistoryConfigs = new LinkedHashMap<>();
databaseHistoryConfigs.put("database.history.kafka.bootstrap.servers", connector.getSpec().getDeployment().getKafka().getUrl());
databaseHistoryConfigs.put("database.history.kafka.topic", connector.getMetadata().getName() + "-database-history");
databaseHistoryConfigs.put("database.history.producer.security.protocol", "SASL_SSL");
databaseHistoryConfigs.put("database.history.consumer.security.protocol", "SASL_SSL");
databaseHistoryConfigs.put("database.history.producer.sasl.mechanism", "PLAIN");
databaseHistoryConfigs.put("database.history.consumer.sasl.mechanism", "PLAIN");
databaseHistoryConfigs.put("database.history.producer.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + serviceAccountSpec.getClientId() + "\" password=\"" + "${dir:/opt/kafka/external-configuration/" + EXTERNAL_CONFIG_DIRECTORY + ":" + KAFKA_CLIENT_SECRET_KEY + "}\";");
databaseHistoryConfigs.put("database.history.consumer.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + serviceAccountSpec.getClientId() + "\" password=\"" + "${dir:/opt/kafka/external-configuration/" + EXTERNAL_CONFIG_DIRECTORY + ":" + KAFKA_CLIENT_SECRET_KEY + "}\";");
connectorConfig.putAll(databaseHistoryConfigs);
}
final KafkaConnector kctr = new KafkaConnectorBuilder().withApiVersion(Constants.RESOURCE_GROUP_NAME + "/" + KafkaConnector.CONSUMED_VERSION).withMetadata(new ObjectMetaBuilder().withName(connector.getMetadata().getName()).addToLabels(STRIMZI_DOMAIN + "cluster", connector.getMetadata().getName()).build()).withSpec(new KafkaConnectorSpecBuilder().withClassName(shardMetadata.getConnectorClass()).withTasksMax(1).withPause(false).withConfig(connectorConfig).addToConfig("topic.creation.default.replication.factor", -1).addToConfig("topic.creation.default.partitions", -1).addToConfig("topic.creation.default.cleanup.policy", "compact").addToConfig("topic.creation.default.compression.type", "lz4").addToConfig("topic.creation.default.delete.retention.ms", 2_678_400_000L).build()).build();
return List.of(secret, kafkaConnectMetricsConfigMap, kc, kctr);
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class TopicOperations method getTopicList.
public CompletionStage<Types.TopicList> getTopicList(KafkaAdminClient ac, Pattern pattern, Types.DeprecatedPageRequest pageRequest, Types.TopicSortParams orderByInput) {
Promise<Set<String>> describeTopicsNamesPromise = Promise.promise();
Promise<Map<String, io.vertx.kafka.admin.TopicDescription>> describeTopicsPromise = Promise.promise();
Promise<Map<ConfigResource, Config>> describeTopicConfigPromise = Promise.promise();
Promise<Types.TopicList> prom = Promise.promise();
List<ConfigResource> configResourceList = new ArrayList<>();
List<Types.Topic> fullDescription = new ArrayList<>();
ac.listTopics(describeTopicsNamesPromise);
describeTopicsNamesPromise.future().compose(topics -> {
List<String> filteredList = topics.stream().filter(topicName -> CommonHandler.byName(pattern, prom).test(topicName)).collect(Collectors.toList());
ac.describeTopics(filteredList, describeTopicsPromise);
return describeTopicsPromise.future();
}).compose(topics -> {
topics.entrySet().forEach(topicWithDescription -> {
Types.Topic desc = getTopicDesc(topicWithDescription.getValue());
fullDescription.add(desc);
ConfigResource resource = new ConfigResource(org.apache.kafka.common.config.ConfigResource.Type.TOPIC, desc.getName());
configResourceList.add(resource);
});
ac.describeConfigs(configResourceList, describeTopicConfigPromise);
return describeTopicConfigPromise.future();
}).compose(topicsConfigurations -> {
List<Types.Topic> fullTopicDescriptions = new ArrayList<>();
fullDescription.forEach(topicWithDescription -> {
ConfigResource resource = new ConfigResource(org.apache.kafka.common.config.ConfigResource.Type.TOPIC, topicWithDescription.getName());
Config cfg = topicsConfigurations.get(resource);
topicWithDescription.setConfig(getTopicConf(cfg));
fullTopicDescriptions.add(topicWithDescription);
});
if (Types.SortDirectionEnum.DESC.equals(orderByInput.getOrder())) {
fullTopicDescriptions.sort(new CommonHandler.TopicComparator(orderByInput.getField()).reversed());
} else {
fullTopicDescriptions.sort(new CommonHandler.TopicComparator(orderByInput.getField()));
}
Types.TopicList topicList = new Types.TopicList();
List<Types.Topic> croppedList;
if (pageRequest.isDeprecatedFormat()) {
// deprecated
if (pageRequest.getOffset() > fullTopicDescriptions.size()) {
return Future.failedFuture(new InvalidRequestException("Offset (" + pageRequest.getOffset() + ") cannot be greater than topic list size (" + fullTopicDescriptions.size() + ")"));
}
int tmpLimit = pageRequest.getLimit();
if (tmpLimit == 0) {
tmpLimit = fullTopicDescriptions.size();
}
croppedList = fullTopicDescriptions.subList(pageRequest.getOffset(), Math.min(pageRequest.getOffset() + tmpLimit, fullTopicDescriptions.size()));
topicList.setOffset(pageRequest.getOffset());
topicList.setLimit(pageRequest.getLimit());
topicList.setCount(croppedList.size());
} else {
if (fullTopicDescriptions.size() > 0 && (pageRequest.getPage() - 1) * pageRequest.getSize() >= fullTopicDescriptions.size()) {
return Future.failedFuture(new InvalidRequestException("Requested pagination incorrect. Beginning of list greater than full list size (" + fullTopicDescriptions.size() + ")"));
}
croppedList = fullTopicDescriptions.subList((pageRequest.getPage() - 1) * pageRequest.getSize(), Math.min(pageRequest.getPage() * pageRequest.getSize(), fullTopicDescriptions.size()));
topicList.setPage(pageRequest.getPage());
topicList.setSize(pageRequest.getSize());
topicList.setTotal(fullTopicDescriptions.size());
}
topicList.setItems(croppedList);
return Future.succeededFuture(topicList);
}).onComplete(finalRes -> {
if (finalRes.failed()) {
prom.fail(finalRes.cause());
} else {
prom.complete(finalRes.result());
}
ac.close();
});
return prom.future().toCompletionStage();
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class TopicOperations method getTopicDescAndConf.
private static Promise<Types.Topic> getTopicDescAndConf(KafkaAdminClient ac, String topicToDescribe) {
Promise<Types.Topic> result = Promise.promise();
Types.Topic tmp = new Types.Topic();
ConfigResource resource = new ConfigResource(org.apache.kafka.common.config.ConfigResource.Type.TOPIC, topicToDescribe);
ac.describeTopics(Collections.singletonList(topicToDescribe)).compose(topics -> {
io.vertx.kafka.admin.TopicDescription topicDesc = topics.get(topicToDescribe);
return Future.succeededFuture(getTopicDesc(topicDesc));
}).compose(topic -> {
tmp.setName(topic.getName());
tmp.setIsInternal(topic.getIsInternal());
tmp.setPartitions(topic.getPartitions());
return Future.succeededFuture();
}).compose(kkk -> ac.describeConfigs(Collections.singletonList(resource)).compose(topics -> {
Config cfg = topics.get(resource);
tmp.setConfig(getTopicConf(cfg));
return Future.succeededFuture(tmp);
})).onComplete(f -> {
if (f.succeeded()) {
result.complete(f.result());
} else {
result.fail(f.cause());
}
});
return result;
}
Aggregations