use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandController method doReify.
@Override
protected List<HasMetadata> doReify(ManagedConnector connector, DebeziumShardMetadata shardMetadata, ConnectorConfiguration<ObjectNode, DebeziumDataShape> connectorConfiguration, ServiceAccountSpec serviceAccountSpec) {
final Map<String, String> secretsData = createSecretsData(connectorConfiguration.getConnectorSpec());
final Secret secret = new SecretBuilder().withMetadata(new ObjectMetaBuilder().withName(connector.getMetadata().getName() + Resources.CONNECTOR_SECRET_SUFFIX).build()).addToData(EXTERNAL_CONFIG_FILE, asBytesBase64(secretsData)).addToData(KAFKA_CLIENT_SECRET_KEY, serviceAccountSpec.getClientSecret()).build();
ConfigMap kafkaConnectMetricsConfigMap = new ConfigMapBuilder().withNewMetadata().withName(connector.getMetadata().getName() + KAFKA_CONNECT_METRICS_CONFIGMAP_NAME_SUFFIX).endMetadata().addToData(METRICS_CONFIG_FILENAME, METRICS_CONFIG).build();
final KafkaConnectSpecBuilder kcsb = new KafkaConnectSpecBuilder().withReplicas(1).withBootstrapServers(connector.getSpec().getDeployment().getKafka().getUrl()).withKafkaClientAuthenticationPlain(new KafkaClientAuthenticationPlainBuilder().withUsername(serviceAccountSpec.getClientId()).withPasswordSecret(new PasswordSecretSourceBuilder().withSecretName(secret.getMetadata().getName()).withPassword(KAFKA_CLIENT_SECRET_KEY).build()).build()).addToConfig(DebeziumConstants.DEFAULT_CONFIG_OPTIONS).addToConfig(new TreeMap<>(configuration.kafkaConnect().config())).addToConfig("group.id", connector.getMetadata().getName()).addToConfig(KeyAndValueConverters.getConfig(connectorConfiguration.getDataShapeSpec(), connector, serviceAccountSpec)).addToConfig("offset.storage.topic", connector.getMetadata().getName() + "-offset").addToConfig("config.storage.topic", connector.getMetadata().getName() + "-config").addToConfig("status.storage.topic", connector.getMetadata().getName() + "-status").addToConfig("topic.creation.enable", "true").addToConfig("connector.secret.name", secret.getMetadata().getName()).addToConfig("connector.secret.checksum", Secrets.computeChecksum(secret)).withTls(new ClientTlsBuilder().withTrustedCertificates(Collections.emptyList()).build()).withTemplate(new KafkaConnectTemplateBuilder().withPod(new PodTemplateBuilder().withImagePullSecrets(configuration.imagePullSecretsName()).build()).build()).withJmxPrometheusExporterMetricsConfig(new JmxPrometheusExporterMetricsBuilder().withValueFrom(new ExternalConfigurationReferenceBuilder().withNewConfigMapKeyRef(METRICS_CONFIG_FILENAME, kafkaConnectMetricsConfigMap.getMetadata().getName(), false).build()).build()).withExternalConfiguration(new ExternalConfigurationBuilder().addToVolumes(new ExternalConfigurationVolumeSourceBuilder().withName(EXTERNAL_CONFIG_DIRECTORY).withSecret(new SecretVolumeSourceBuilder().withSecretName(secret.getMetadata().getName()).build()).build()).build()).withResources(new ResourceRequirementsBuilder().addToRequests("cpu", new Quantity("10m")).addToRequests("memory", new Quantity("256Mi")).addToLimits("cpu", new Quantity("500m")).addToLimits("memory", new Quantity("1Gi")).build());
kcsb.withImage(shardMetadata.getContainerImage());
final KafkaConnect kc = new KafkaConnectBuilder().withApiVersion(Constants.RESOURCE_GROUP_NAME + "/" + KafkaConnect.CONSUMED_VERSION).withMetadata(new ObjectMetaBuilder().withName(connector.getMetadata().getName()).addToAnnotations(STRIMZI_IO_USE_CONNECTOR_RESOURCES, "true").build()).withSpec(kcsb.build()).build();
Map<String, Object> connectorConfig = createConfig(configuration, connectorConfiguration.getConnectorSpec());
// handle connector config defaults
switch(shardMetadata.getConnectorClass()) {
case CLASS_NAME_POSTGRES_CONNECTOR:
if (!connectorConfig.containsKey(CONFIG_OPTION_POSTGRES_PLUGIN_NAME)) {
connectorConfig.put(CONFIG_OPTION_POSTGRES_PLUGIN_NAME, PLUGIN_NAME_PGOUTPUT);
}
break;
default:
break;
}
if (isDatabaseHistorySupported(shardMetadata)) {
final Map<String, Object> databaseHistoryConfigs = new LinkedHashMap<>();
databaseHistoryConfigs.put("database.history.kafka.bootstrap.servers", connector.getSpec().getDeployment().getKafka().getUrl());
databaseHistoryConfigs.put("database.history.kafka.topic", connector.getMetadata().getName() + "-database-history");
databaseHistoryConfigs.put("database.history.producer.security.protocol", "SASL_SSL");
databaseHistoryConfigs.put("database.history.consumer.security.protocol", "SASL_SSL");
databaseHistoryConfigs.put("database.history.producer.sasl.mechanism", "PLAIN");
databaseHistoryConfigs.put("database.history.consumer.sasl.mechanism", "PLAIN");
databaseHistoryConfigs.put("database.history.producer.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + serviceAccountSpec.getClientId() + "\" password=\"" + "${dir:/opt/kafka/external-configuration/" + EXTERNAL_CONFIG_DIRECTORY + ":" + KAFKA_CLIENT_SECRET_KEY + "}\";");
databaseHistoryConfigs.put("database.history.consumer.sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + serviceAccountSpec.getClientId() + "\" password=\"" + "${dir:/opt/kafka/external-configuration/" + EXTERNAL_CONFIG_DIRECTORY + ":" + KAFKA_CLIENT_SECRET_KEY + "}\";");
connectorConfig.putAll(databaseHistoryConfigs);
}
final KafkaConnector kctr = new KafkaConnectorBuilder().withApiVersion(Constants.RESOURCE_GROUP_NAME + "/" + KafkaConnector.CONSUMED_VERSION).withMetadata(new ObjectMetaBuilder().withName(connector.getMetadata().getName()).addToLabels(STRIMZI_DOMAIN + "cluster", connector.getMetadata().getName()).build()).withSpec(new KafkaConnectorSpecBuilder().withClassName(shardMetadata.getConnectorClass()).withTasksMax(1).withPause(false).withConfig(connectorConfig).addToConfig("topic.creation.default.replication.factor", -1).addToConfig("topic.creation.default.partitions", -1).addToConfig("topic.creation.default.cleanup.policy", "compact").addToConfig("topic.creation.default.compression.type", "lz4").addToConfig("topic.creation.default.delete.retention.ms", 2_678_400_000L).build()).build();
return List.of(secret, kafkaConnectMetricsConfigMap, kc, kctr);
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method reify.
void reify(String connectorClass, ObjectNode connectorConfig, Consumer<KafkaConnect> kafkaConnectChecks) {
KubernetesClient kubernetesClient = Mockito.mock(KubernetesClient.class);
DebeziumOperandController controller = new DebeziumOperandController(kubernetesClient, CONFIGURATION);
var resources = controller.doReify(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(DEFAULT_MANAGED_CONNECTOR_ID).withUid(MANAGED_CONNECTOR_UID).build()).withSpec(new ManagedConnectorSpecBuilder().withConnectorId(DEFAULT_MANAGED_CONNECTOR_ID).withDeploymentId(DEFAULT_DEPLOYMENT_ID).withDeployment(new DeploymentSpecBuilder().withConnectorTypeId(DEFAULT_CONNECTOR_TYPE_ID).withSecret("secret").withKafka(new KafkaSpecBuilder().withUrl(DEFAULT_KAFKA_SERVER).build()).withNewSchemaRegistry(SCHEMA_REGISTRY_ID, SCHEMA_REGISTRY_URL).withConnectorResourceVersion(DEFAULT_CONNECTOR_REVISION).withDeploymentResourceVersion(DEFAULT_DEPLOYMENT_REVISION).withDesiredState(DESIRED_STATE_READY).build()).build()).build(), new org.bf2.cos.fleetshard.operator.debezium.DebeziumShardMetadataBuilder().withContainerImage(DEFAULT_CONNECTOR_IMAGE).withConnectorClass(connectorClass).build(), new ConnectorConfiguration<>(connectorConfig, ObjectNode.class, DebeziumDataShape.class), new ServiceAccountSpecBuilder().withClientId(CLIENT_ID).withClientSecret(CLIENT_SECRET).build());
assertThat(resources).anyMatch(DebeziumOperandSupport::isKafkaConnect).anyMatch(DebeziumOperandSupport::isKafkaConnector).anyMatch(DebeziumOperandSupport::isSecret).anyMatch(DebeziumOperandSupport::isConfigMap);
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnect).hasSize(1).first().isInstanceOfSatisfying(KafkaConnect.class, kc -> {
assertThat(kc.getSpec().getImage()).isEqualTo(DEFAULT_CONNECTOR_IMAGE);
assertThat(kc.getSpec().getTemplate().getPod().getImagePullSecrets()).contains(CONFIGURATION.imagePullSecretsName());
assertThat(kc.getSpec().getMetricsConfig().getType()).isEqualTo("jmxPrometheusExporter");
assertThat(kc.getSpec().getMetricsConfig()).isInstanceOfSatisfying(JmxPrometheusExporterMetrics.class, jmxMetricsConfig -> {
assertThat(jmxMetricsConfig.getValueFrom().getConfigMapKeyRef().getKey()).isEqualTo(DebeziumOperandController.METRICS_CONFIG_FILENAME);
assertThat(jmxMetricsConfig.getValueFrom().getConfigMapKeyRef().getName()).isEqualTo(DEFAULT_MANAGED_CONNECTOR_ID + DebeziumOperandController.KAFKA_CONNECT_METRICS_CONFIGMAP_NAME_SUFFIX);
});
});
assertThat(resources).filteredOn(DebeziumOperandSupport::isConfigMap).hasSize(1).first().isInstanceOfSatisfying(ConfigMap.class, configMap -> {
assertThat(configMap.getData()).containsKey(DebeziumOperandController.METRICS_CONFIG_FILENAME);
assertThat(configMap.getData().get(DebeziumOperandController.METRICS_CONFIG_FILENAME)).isEqualTo(DebeziumOperandController.METRICS_CONFIG);
});
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnector).hasSize(1).first().isInstanceOfSatisfying(KafkaConnector.class, kctr -> {
assertThat(kctr.getSpec().getConfig()).containsEntry("database.password", "${file:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + "/" + EXTERNAL_CONFIG_FILE + ":database.password}");
if (PG_CLASS.equals(connectorClass)) {
// Specifically test the plugin name for PostgreSQL
assertThat(kctr.getSpec().getConfig().get(DebeziumOperandController.CONFIG_OPTION_POSTGRES_PLUGIN_NAME)).isEqualTo(DebeziumOperandController.PLUGIN_NAME_PGOUTPUT);
}
if (MYSQL_CLASS.equals(connectorClass)) {
// Specifically test database history does not pass secrets directly
assertThat(kctr.getSpec().getConfig().get("database.history.consumer.sasl.jaas.config")).isEqualTo("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + CLIENT_ID + "\" password=\"${dir:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + ":" + KAFKA_CLIENT_SECRET_KEY + "}\";");
assertThat(kctr.getSpec().getConfig().get("database.history.producer.sasl.jaas.config")).isEqualTo("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"" + CLIENT_ID + "\" password=\"${dir:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + ":" + KAFKA_CLIENT_SECRET_KEY + "}\";");
}
});
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnect).hasSize(1).first().isInstanceOfSatisfying(KafkaConnect.class, kafkaConnectChecks);
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class RecordOperations method produceRecord.
public CompletionStage<Types.Record> produceRecord(String topicName, Types.Record input) {
String key = input.getKey();
List<Header> headers = input.getHeaders() != null ? input.getHeaders().entrySet().stream().map(h -> new Header() {
@Override
public String key() {
return h.getKey();
}
@Override
public byte[] value() {
return h.getValue().getBytes();
}
}).collect(Collectors.toList()) : Collections.emptyList();
CompletableFuture<Types.Record> promise = new CompletableFuture<>();
Producer<String, String> producer = clientFactory.createProducer();
ProducerRecord<String, String> request = new ProducerRecord<>(topicName, input.getPartition(), stringToTimestamp(input.getTimestamp()), key, input.getValue(), headers);
producer.send(request, (meta, exception) -> {
if (exception != null) {
promise.completeExceptionally(exception);
} else {
Types.Record result = new Types.Record();
result.setPartition(meta.partition());
if (meta.hasOffset()) {
result.setOffset(meta.offset());
}
if (meta.hasTimestamp()) {
result.setTimestamp(timestampToString(meta.timestamp()));
}
result.setKey(input.getKey());
result.setValue(input.getValue());
result.setHeaders(input.getHeaders());
promise.complete(result);
}
});
return promise.whenComplete((result, exception) -> {
try {
producer.close(Duration.ZERO);
} catch (Exception e) {
log.warnf("Exception closing Kafka Producer", e);
}
});
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kafka-admin-api by bf2fc6cc711aee1a0c2a.
the class DeploymentManager method deployKafka.
private KafkaContainer<?> deployKafka() {
LOGGER.info("Deploying Kafka container");
Map<String, String> env = new HashMap<>();
try (InputStream stream = getClass().getResourceAsStream("/kafka-oauth/env.properties")) {
Properties envProps = new Properties();
envProps.load(stream);
envProps.keySet().stream().map(Object::toString).forEach(key -> env.put(key, envProps.getProperty(key)));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
String imageTag = System.getProperty("strimzi-kafka.tag");
var container = new KeycloakSecuredKafkaContainer(imageTag).withLabels(Collections.singletonMap("test-ident", Environment.TEST_CONTAINER_LABEL)).withLogConsumer(new Slf4jLogConsumer(LoggerFactory.getLogger("systemtests.oauth-kafka"), true)).withCreateContainerCmdModifier(cmd -> cmd.withName(name("oauth-kafka"))).withEnv(env).withNetwork(testNetwork).withClasspathResourceMapping("/certs/cluster.keystore.p12", "/opt/kafka/certs/cluster.keystore.p12", BindMode.READ_ONLY).withClasspathResourceMapping("/certs/cluster.truststore.p12", "/opt/kafka/certs/cluster.truststore.p12", BindMode.READ_ONLY).withClasspathResourceMapping("/kafka-oauth/config/", "/opt/kafka/config/strimzi/", BindMode.READ_ONLY).withCopyFileToContainer(MountableFile.forClasspathResource("/kafka-oauth/scripts/functions.sh"), "/opt/kafka/functions.sh").withCopyFileToContainer(MountableFile.forClasspathResource("/kafka-oauth/scripts/simple_kafka_config.sh", 0755), "/opt/kafka/simple_kafka_config.sh").withCopyFileToContainer(MountableFile.forClasspathResource("/kafka-oauth/scripts/start.sh", 0755), "/opt/kafka/start.sh").withCommand("/opt/kafka/start.sh");
container.start();
return container;
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class Canary method buildEnvVar.
private List<EnvVar> buildEnvVar(ManagedKafka managedKafka, Deployment current) {
List<EnvVar> envVars = new ArrayList<>(10);
String bootstrap = getBootstrapURL(managedKafka);
envVars.add(new EnvVarBuilder().withName("KAFKA_BOOTSTRAP_SERVERS").withValue(bootstrap).build());
envVars.add(new EnvVarBuilder().withName("RECONCILE_INTERVAL_MS").withValue("5000").build());
envVars.add(new EnvVarBuilder().withName("EXPECTED_CLUSTER_SIZE").withValue(String.valueOf(kafkaCluster.getReplicas(managedKafka))).build());
String kafkaVersion = managedKafka.getSpec().getVersions().getKafka();
// takes the current Kafka version if the canary already exists. During Kafka upgrades it doesn't have to change, as any other clients.
if (current != null) {
Optional<EnvVar> kafkaVersionEnvVar = current.getSpec().getTemplate().getSpec().getContainers().stream().filter(container -> "canary".equals(container.getName())).findFirst().get().getEnv().stream().filter(ev -> "KAFKA_VERSION".equals(ev.getName())).findFirst();
if (kafkaVersionEnvVar.isPresent()) {
kafkaVersion = kafkaVersionEnvVar.get().getValue();
}
}
envVars.add(new EnvVarBuilder().withName("KAFKA_VERSION").withValue(kafkaVersion).build());
envVars.add(new EnvVarBuilder().withName("TZ").withValue("UTC").build());
envVars.add(new EnvVarBuilder().withName("TLS_ENABLED").withValue("true").build());
envVars.add(new EnvVarBuilder().withName("TLS_CA_CERT").withValue("/tmp/tls-ca-cert/ca.crt").build());
// Deprecated
EnvVarSource saramaLogEnabled = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("sarama.log.enabled").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
EnvVarSource verbosityLogLevel = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("verbosity.log.level").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
EnvVarSource goDebug = new EnvVarSourceBuilder().editOrNewConfigMapKeyRef().withName(CANARY_CONFIG_CONFIGMAP_NAME).withKey("go.debug").withOptional(Boolean.TRUE).endConfigMapKeyRef().build();
envVars.add(new EnvVarBuilder().withName("SARAMA_LOG_ENABLED").withValueFrom(saramaLogEnabled).build());
envVars.add(new EnvVarBuilder().withName("VERBOSITY_LOG_LEVEL").withValueFrom(verbosityLogLevel).build());
envVars.add(new EnvVarBuilder().withName("GODEBUG").withValueFrom(goDebug).build());
envVars.add(new EnvVarBuilder().withName("TOPIC").withValue(config.getCanary().getTopic()).build());
envVars.add(new EnvVarBuilder().withName("TOPIC_CONFIG").withValue("retention.ms=600000;segment.bytes=16384").build());
envVars.add(new EnvVarBuilder().withName("CLIENT_ID").withValue(config.getCanary().getClientId()).build());
envVars.add(new EnvVarBuilder().withName("CONSUMER_GROUP_ID").withValue(config.getCanary().getConsumerGroupId()).build());
envVars.add(new EnvVarBuilder().withName("PRODUCER_LATENCY_BUCKETS").withValue(producerLatencyBuckets).build());
envVars.add(new EnvVarBuilder().withName("ENDTOEND_LATENCY_BUCKETS").withValue(endToEndLatencyBuckets).build());
envVars.add(new EnvVarBuilder().withName("CONNECTION_CHECK_LATENCY_BUCKETS").withValue(connectionCheckLatencyBuckets).build());
envVars.add(new EnvVarBuilder().withName("DYNAMIC_CONFIG_FILE").withValue(CANARY_DYNAMIC_CONFIG_JSON.toString()).build());
if (SecuritySecretManager.isCanaryServiceAccountPresent(managedKafka)) {
envVars.add(new EnvVarBuilder().withName("SASL_MECHANISM").withValue("PLAIN").build());
addEnvVarFromSecret(envVars, "SASL_USER", SecuritySecretManager.canarySaslSecretName(managedKafka), SecuritySecretManager.SASL_PRINCIPAL);
addEnvVarFromSecret(envVars, "SASL_PASSWORD", SecuritySecretManager.canarySaslSecretName(managedKafka), SecuritySecretManager.SASL_PASSWORD);
}
envVars.add(new EnvVarBuilder().withName("STATUS_TIME_WINDOW_MS").withValue(String.valueOf(statusTimeWindowMs)).build());
return envVars;
}
Aggregations