use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class InstanceProfiler method deployIfNeeded.
private void deployIfNeeded(String name) throws Exception {
ManagedKafka mk = null;
Resource<ManagedKafka> mkResource = kafkaCluster.kubeClient().client().resources(ManagedKafka.class).inNamespace(Constants.KAFKA_NAMESPACE).withName(name);
try {
mk = mkResource.get();
} catch (KubernetesClientException e) {
}
ManagedKafkaDeployment kd = null;
if (mk == null) {
if (!installedProvisioner) {
// TODO: come up with a better resume logic here - it currently has to recreate everything
installedProvisioner = true;
kafkaProvisioner.install();
}
kafkaProvisioner.removeClusters(true);
kd = kafkaProvisioner.deployCluster(name, profilingResult.capacity, profilingResult.config);
} else {
// TODO validate config / capacity
kd = new ManagedKafkaDeployment(mk, kafkaCluster);
kd.start();
}
instanceBootstrap = kd.waitUntilReady();
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project srs-fleet-manager by bf2fc6cc711aee1a0c2a.
the class RegistryDeploymentServiceImpl method init.
@Override
public void init() throws IOException, RegistryDeploymentStorageConflictException, RegistryDeploymentNotFoundException {
if (deploymentsConfigFile.isEmpty()) {
return;
}
log.info("Loading registry deployments config file from {}", deploymentsConfigFile.get().getAbsolutePath());
YAMLMapper mapper = new YAMLMapper();
RegistryDeploymentsConfigList deploymentsConfigList = mapper.readValue(deploymentsConfigFile.get(), RegistryDeploymentsConfigList.class);
List<RegistryDeploymentCreate> staticDeployments = deploymentsConfigList.getDeployments();
Set<String> names = new HashSet<>();
List<String> duplicatedNames = staticDeployments.stream().map(d -> {
Set<ConstraintViolation<RegistryDeploymentCreate>> errors = validator.validate(d);
if (!errors.isEmpty()) {
throw new ConstraintViolationException(errors);
}
return d;
}).filter(d -> !names.add(d.getName())).map(d -> d.getName()).collect(Collectors.toList());
if (!duplicatedNames.isEmpty()) {
throw new IllegalArgumentException("Error in static deployments config, duplicated deployments name: " + duplicatedNames.toString());
}
Map<String, RegistryDeploymentData> currentDeployments = storage.getAllRegistryDeployments().stream().collect(Collectors.toMap(d -> d.getName(), d -> d));
for (RegistryDeploymentCreate dep : staticDeployments) {
RegistryDeploymentData deploymentData = currentDeployments.get(dep.getName());
if (deploymentData == null) {
// deployment is new
deploymentData = convertRegistryDeployment.convert(dep);
} else {
if (deploymentData.getRegistryDeploymentUrl().equals(dep.getRegistryDeploymentUrl()) && deploymentData.getTenantManagerUrl().equals(dep.getTenantManagerUrl())) {
// no changes in the deployment
continue;
}
deploymentData.setRegistryDeploymentUrl(dep.getRegistryDeploymentUrl());
deploymentData.setTenantManagerUrl(dep.getTenantManagerUrl());
}
createOrUpdateRegistryDeployment(deploymentData);
}
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class NamespaceProvisionerWithQuotaAndCustomLimitsTest method namespaceIsProvisioned.
@Test
void namespaceIsProvisioned() {
final Config cfg = ConfigProvider.getConfig();
final String nsId1 = cfg.getValue("test.ns.id.1", String.class);
final NamespacedName pullSecret = new NamespacedName(client.generateNamespaceId(nsId1), config.imagePullSecretsName());
RestAssured.given().contentType(MediaType.TEXT_PLAIN).body(0L).post("/test/provisioner/namespaces");
Namespace ns = until(() -> fleetShardClient.getNamespace(nsId1), Objects::nonNull);
assertThat(ns).satisfies(item -> {
assertThat(item.getMetadata().getName()).isEqualTo(client.generateNamespaceId(nsId1));
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_CLUSTER_ID, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_NAMESPACE_ID, nsId1).containsEntry(Resources.LABEL_KUBERNETES_MANAGED_BY, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_CREATED_BY, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_PART_OF, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_COMPONENT, Resources.COMPONENT_NAMESPACE).containsEntry(Resources.LABEL_KUBERNETES_INSTANCE, nsId1).containsKey(Resources.LABEL_UOW);
assertThat(item.getMetadata().getAnnotations()).containsEntry(Resources.ANNOTATION_NAMESPACE_QUOTA, "true");
});
until(() -> fleetShardClient.getSecret(pullSecret).filter(ps -> {
return Objects.equals(ps.getMetadata().getLabels().get(Resources.LABEL_UOW), ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
}), Objects::nonNull);
untilAsserted(() -> {
return Optional.ofNullable(fleetShardClient.getKubernetesClient().limitRanges().inNamespace(ns.getMetadata().getName()).withName(ns.getMetadata().getName() + "-limits").get());
}, lr -> {
assertThat(lr).satisfies(item -> {
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_UOW, ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
assertThat(item.getSpec().getLimits()).hasSize(1);
assertThat(item.getSpec().getLimits().get(0).getDefault()).describedAs("LimitRanges (limits)").containsEntry(ConnectorNamespaceProvisioner.LIMITS_CPU, cfg.getValue("cos.quota.default-limits.cpu", Quantity.class)).containsEntry(ConnectorNamespaceProvisioner.LIMITS_MEMORY, cfg.getValue("cos.quota.default-limits.memory", Quantity.class));
assertThat(item.getSpec().getLimits().get(0).getDefaultRequest()).describedAs("LimitRanges (request)").containsEntry(ConnectorNamespaceProvisioner.LIMITS_CPU, cfg.getValue("cos.quota.default-request.cpu", Quantity.class)).containsEntry(ConnectorNamespaceProvisioner.LIMITS_MEMORY, cfg.getValue("cos.quota.default-request.memory", Quantity.class));
});
});
ResourceQuota rq = until(() -> {
ResourceQuota answer = fleetShardClient.getKubernetesClient().resourceQuotas().inNamespace(ns.getMetadata().getName()).withName(ns.getMetadata().getName() + "-quota").get();
return Optional.ofNullable(answer);
}, Objects::nonNull);
assertThat(rq).satisfies(item -> {
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_UOW, ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
assertThat(item.getSpec().getHard()).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_LIMITS_CPU, new Quantity(cfg.getValue("test.ns.id.1.limits.cpu", String.class))).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_REQUESTS_CPU, new Quantity(cfg.getValue("test.ns.id.1.requests.cpu", String.class))).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_LIMITS_MEMORY, new Quantity(cfg.getValue("test.ns.id.1.limits.memory", String.class))).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_REQUESTS_MEMORY, new Quantity(cfg.getValue("test.ns.id.1.requests.memory", String.class)));
});
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class NamespaceProvisionerWithQuotaTest method namespaceIsProvisioned.
@Test
void namespaceIsProvisioned() {
final Config cfg = ConfigProvider.getConfig();
final String nsId1 = cfg.getValue("test.ns.id.1", String.class);
final NamespacedName pullSecret = new NamespacedName(client.generateNamespaceId(nsId1), config.imagePullSecretsName());
RestAssured.given().contentType(MediaType.TEXT_PLAIN).body(0L).post("/test/provisioner/namespaces");
Namespace ns = until(() -> fleetShardClient.getNamespace(nsId1), Objects::nonNull);
assertThat(ns).satisfies(item -> {
assertThat(item.getMetadata().getName()).isEqualTo(client.generateNamespaceId(nsId1));
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_CLUSTER_ID, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_NAMESPACE_ID, nsId1).containsEntry(Resources.LABEL_KUBERNETES_MANAGED_BY, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_CREATED_BY, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_PART_OF, fleetShardClient.getClusterId()).containsEntry(Resources.LABEL_KUBERNETES_COMPONENT, Resources.COMPONENT_NAMESPACE).containsEntry(Resources.LABEL_KUBERNETES_INSTANCE, nsId1).containsKey(Resources.LABEL_UOW);
assertThat(item.getMetadata().getAnnotations()).containsEntry(Resources.ANNOTATION_NAMESPACE_QUOTA, "true");
});
until(() -> fleetShardClient.getSecret(pullSecret).filter(ps -> {
return Objects.equals(ps.getMetadata().getLabels().get(Resources.LABEL_UOW), ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
}), Objects::nonNull);
untilAsserted(() -> {
return Optional.ofNullable(fleetShardClient.getKubernetesClient().limitRanges().inNamespace(ns.getMetadata().getName()).withName(ns.getMetadata().getName() + "-limits").get());
}, lr -> {
assertThat(lr).satisfies(item -> {
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_UOW, ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
assertThat(item.getSpec().getLimits()).hasSize(1);
assertThat(item.getSpec().getLimits().get(0).getDefault()).describedAs("LimitRanges (limits)").containsEntry(ConnectorNamespaceProvisioner.LIMITS_CPU, new Quantity("0.5")).containsEntry(ConnectorNamespaceProvisioner.LIMITS_MEMORY, new Quantity("0.5G"));
assertThat(item.getSpec().getLimits().get(0).getDefaultRequest()).describedAs("LimitRanges (request)").containsEntry(ConnectorNamespaceProvisioner.LIMITS_CPU, new Quantity("200m")).containsEntry(ConnectorNamespaceProvisioner.LIMITS_MEMORY, new Quantity("128m"));
});
});
ResourceQuota rq = until(() -> {
ResourceQuota answer = fleetShardClient.getKubernetesClient().resourceQuotas().inNamespace(ns.getMetadata().getName()).withName(ns.getMetadata().getName() + "-quota").get();
return Optional.ofNullable(answer);
}, Objects::nonNull);
assertThat(rq).satisfies(item -> {
assertThat(item.getMetadata().getLabels()).containsEntry(Resources.LABEL_UOW, ns.getMetadata().getLabels().get(Resources.LABEL_UOW));
assertThat(item.getSpec().getHard()).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_LIMITS_CPU, cfg.getValue("test.ns.id.1.limits.cpu", Quantity.class)).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_REQUESTS_CPU, cfg.getValue("test.ns.id.1.requests.cpu", Quantity.class)).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_LIMITS_MEMORY, cfg.getValue("test.ns.id.1.limits.memory", Quantity.class)).containsEntry(ConnectorNamespaceProvisioner.RESOURCE_QUOTA_REQUESTS_MEMORY, cfg.getValue("test.ns.id.1.requests.memory", Quantity.class));
});
}
use of org.bf2.admin.kafka.systemtest.Environment.CONFIG in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractApicurioConverter method getAdditionalConfig.
@Override
public Map<String, String> getAdditionalConfig(ManagedConnector config, ServiceAccountSpec serviceAccountSpec) {
Map<String, String> additionalConfig = new HashMap<>();
additionalConfig.put("apicurio.auth.service.url", APICURIO_AUTH_SERVICE_URL);
additionalConfig.put("apicurio.auth.realm", "rhoas");
SchemaRegistrySpec schemaRegistrySpec = config.getSpec().getDeployment().getSchemaRegistry();
if (null == schemaRegistrySpec || null == schemaRegistrySpec.getUrl() || schemaRegistrySpec.getUrl().isBlank()) {
throw new RuntimeException("Can't create a schema-based connector without providing a valid 'schema_registry'");
}
String schemaRegistryURL = schemaRegistrySpec.getUrl();
additionalConfig.put("apicurio.registry.url", schemaRegistryURL);
additionalConfig.put("apicurio.auth.client.id", serviceAccountSpec.getClientId());
additionalConfig.put("apicurio.auth.client.secret", "${dir:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + ":" + DebeziumConstants.KAFKA_CLIENT_SECRET_KEY + "}");
additionalConfig.put("apicurio.registry.auto-register", "true");
additionalConfig.put("apicurio.registry.find-latest", "true");
return additionalConfig;
}
Aggregations