use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class KafkaClusterTest method testManagedKafkaToKafkaWithSizeChanges.
@Test
void testManagedKafkaToKafkaWithSizeChanges() throws IOException {
KafkaInstanceConfiguration config = kafkaCluster.getKafkaConfiguration();
try {
ObjectMapper objectMapper = new ObjectMapper();
KafkaInstanceConfiguration clone = objectMapper.readValue(objectMapper.writeValueAsString(config), KafkaInstanceConfiguration.class);
clone.getKafka().setOneInstancePerNode(false);
clone.getKafka().setColocateWithZookeeper(false);
clone.getExporter().setColocateWithZookeeper(false);
kafkaCluster.setKafkaConfiguration(clone);
Kafka kafka = kafkaCluster.kafkaFrom(exampleManagedKafka("60Gi"), null);
Kafka reduced = kafkaCluster.kafkaFrom(exampleManagedKafka("40Gi"), kafka);
// should not change to a smaller size
diffToExpected(reduced, "/expected/strimzi.yml");
Kafka larger = kafkaCluster.kafkaFrom(exampleManagedKafka("80Gi"), kafka);
// should change to a larger size
diffToExpected(larger, "/expected/strimzi.yml", "[{\"op\":\"replace\",\"path\":\"/spec/kafka/config/client.quota.callback.static.storage.soft\",\"value\":\"28633115306\"},{\"op\":\"replace\",\"path\":\"/spec/kafka/config/client.quota.callback.static.storage.hard\",\"value\":\"28675058306\"},{\"op\":\"replace\",\"path\":\"/spec/kafka/storage/volumes/0/size\",\"value\":\"39412476546\"}]");
} finally {
kafkaCluster.setKafkaConfiguration(config);
}
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project kas-fleetshard by bf2fc6cc711aee1a0c2a.
the class LogCollector method saveClusterState.
private static void saveClusterState(Path logpath) throws IOException {
KubeClient kube = KubeClient.getInstance();
Files.writeString(logpath.resolve("describe-cluster-nodes.log"), kube.cmdClient().exec(false, false, "describe", "nodes").out());
Files.writeString(logpath.resolve("all-events.log"), kube.cmdClient().exec(false, false, "get", "events", "--all-namespaces").out());
Files.writeString(logpath.resolve("pvs.log"), kube.cmdClient().exec(false, false, "describe", "pv").out());
Files.writeString(logpath.resolve("operator-routes.yml"), kube.cmdClient().exec(false, false, "get", "routes", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
Files.writeString(logpath.resolve("operator-services.yml"), kube.cmdClient().exec(false, false, "get", "service", "-n", FleetShardOperatorManager.OPERATOR_NS, "-o", "yaml").out());
Files.writeString(logpath.resolve("kas-fleetshard-operator-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("strimzi-kafka-pods.yml"), kube.cmdClient().exec(false, false, "get", "pod", "-l", "app.kubernetes.io/managed-by=strimzi-cluster-operator", "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("managedkafkas.yml"), kube.cmdClient().exec(false, false, "get", "managedkafka", "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("kafkas.yml"), kube.cmdClient().exec(false, false, "get", "kafka", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("pods-managed-by-operator.yml"), kube.cmdClient().exec(false, false, "get", "pods", "-l", "app.kubernetes.io/managed-by=" + FleetShardOperatorManager.OPERATOR_NAME, "--all-namespaces", "-o", "yaml").out());
Files.writeString(logpath.resolve("operator-namespace-events.yml"), kube.cmdClient().exec(false, false, "get", "events", "-n", FleetShardOperatorManager.OPERATOR_NS).out());
Files.writeString(logpath.resolve("operator.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.OPERATOR_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
Files.writeString(logpath.resolve("sync.log"), kube.cmdClient().exec(false, false, "logs", "deployment/" + FleetShardOperatorManager.SYNC_NAME, "-n", FleetShardOperatorManager.OPERATOR_NS).out());
StrimziOperatorManager.getStrimziOperatorPods().forEach(pod -> {
try {
Files.writeString(logpath.resolve(pod.getMetadata().getName() + ".log"), kube.cmdClient().exec(false, false, "logs", pod.getMetadata().getName(), "--tail", "-1", "-n", pod.getMetadata().getNamespace()).out());
} catch (Exception e) {
LOGGER.warn("Cannot get logs from pod {} in namespace {}", pod.getMetadata().getName(), pod.getMetadata().getNamespace());
}
});
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class DebeziumOperandControllerTest method reify.
@Test
void reify() {
KubernetesClient kubernetesClient = Mockito.mock(KubernetesClient.class);
DebeziumOperandController controller = new DebeziumOperandController(kubernetesClient, CONFIGURATION);
final String kcsB64 = Base64.getEncoder().encodeToString("kcs".getBytes(StandardCharsets.UTF_8));
final String pwdB64 = Base64.getEncoder().encodeToString("orderpw".getBytes(StandardCharsets.UTF_8));
var spec = Serialization.jsonMapper().createObjectNode().put("database.hostname", "orderdb").put("database.port", "5432").put("database.user", "orderuser").put("database.dbname", "orderdb").put("database.server.name", "dbserver1").put("schema.include.list", "purchaseorder").put("table.include.list", "purchaseorder.outboxevent").put("tombstones.on.delete", "false").put("key.converter", "org.apache.kafka.connect.storage.StringConverter").put("value.converter", "org.apache.kafka.connect.storage.StringConverter").put("transforms", "saga").put("transforms.saga.type", "io.debezium.transforms.outbox.EventRouter").put("transforms.saga.route.topic.replacement", "${routedByValue}.request").put("poll.interval.ms", "100").put("consumer.interceptor.classes", "io.opentracing.contrib.kafka.TracingConsumerInterceptor").put("producer.interceptor.classes", "io.opentracing.contrib.kafka.TracingProducerInterceptor");
spec.with("data_shape").put("key", "JSON").put("value", "JSON");
spec.with("database.password").put("kind", "base64").put("value", pwdB64);
var resources = controller.doReify(new ManagedConnectorBuilder().withMetadata(new ObjectMetaBuilder().withName(DEFAULT_MANAGED_CONNECTOR_ID).build()).withSpec(new ManagedConnectorSpecBuilder().withConnectorId(DEFAULT_MANAGED_CONNECTOR_ID).withDeploymentId(DEFAULT_DEPLOYMENT_ID).withDeployment(new DeploymentSpecBuilder().withConnectorTypeId(DEFAULT_CONNECTOR_TYPE_ID).withSecret("secret").withKafka(new KafkaSpecBuilder().withUrl(DEFAULT_KAFKA_SERVER).build()).withConnectorResourceVersion(DEFAULT_CONNECTOR_REVISION).withDeploymentResourceVersion(DEFAULT_DEPLOYMENT_REVISION).withDesiredState(DESIRED_STATE_READY).build()).build()).build(), new org.bf2.cos.fleetshard.operator.debezium.DebeziumShardMetadataBuilder().withContainerImage(DEFAULT_CONNECTOR_IMAGE).withConnectorClass(PG_CLASS).build(), new ConnectorConfiguration<>(spec, ObjectNode.class), new ServiceAccountSpecBuilder().withClientId(DEFAULT_KAFKA_CLIENT_ID).withClientSecret(kcsB64).build());
assertThat(resources).anyMatch(DebeziumOperandSupport::isKafkaConnect).anyMatch(DebeziumOperandSupport::isKafkaConnector).anyMatch(DebeziumOperandSupport::isSecret);
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnect).hasSize(1).first().isInstanceOfSatisfying(KafkaConnect.class, kc -> {
assertThat(kc.getSpec().getImage()).isEqualTo(DEFAULT_CONNECTOR_IMAGE);
});
assertThat(resources).filteredOn(DebeziumOperandSupport::isKafkaConnector).hasSize(1).first().isInstanceOfSatisfying(KafkaConnector.class, kc -> {
assertThat(kc.getSpec().getConfig()).containsEntry("database.password", "${file:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + "/" + EXTERNAL_CONFIG_FILE + ":database.password}");
});
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class CamelOperandSupport method createSteps.
public static List<ProcessorKamelet> createSteps(ManagedConnector connector, ConnectorConfiguration<ObjectNode> connectorConfiguration, CamelShardMetadata shardMetadata, Map<String, String> props) {
String consumes = Optional.ofNullable(connectorConfiguration.getDataShapeSpec()).map(spec -> spec.at("/consumes/format")).filter(node -> !node.isMissingNode()).map(JsonNode::asText).orElse(shardMetadata.getConsumes());
String produces = Optional.ofNullable(connectorConfiguration.getDataShapeSpec()).map(spec -> spec.at("/produces/format")).filter(node -> !node.isMissingNode()).map(JsonNode::asText).orElse(shardMetadata.getProduces());
final ArrayNode steps = connectorConfiguration.getProcessorsSpec();
final List<ProcessorKamelet> stepDefinitions = new ArrayList<>(steps.size() + 2);
int i = 0;
if (consumes != null) {
switch(consumes) {
case "application/json":
{
String stepName = stepName(i, "cos-decoder-json-action");
stepDefinitions.add(new ProcessorKamelet("cos-decoder-json-action", stepName));
if (shardMetadata.getConsumesClass() != null) {
props.put(kameletProperty("cos-decoder-json-action", stepName, "contentClass"), shardMetadata.getConsumesClass());
}
i++;
}
break;
case "avro/binary":
{
String stepName = stepName(i, "cos-decoder-avro-action");
stepDefinitions.add(new ProcessorKamelet("cos-decoder-avro-action", stepName));
if (shardMetadata.getConsumesClass() != null) {
props.put(kameletProperty("cos-decoder-avro-action", stepName, "contentClass"), shardMetadata.getConsumesClass());
}
i++;
}
break;
case "application/x-java-object":
{
String stepName = stepName(i, "cos-decoder-pojo-action");
stepDefinitions.add(new ProcessorKamelet("cos-decoder-pojo-action", stepName));
if (produces != null) {
props.put(kameletProperty("cos-decoder-pojo-action", stepName, "mimeType"), produces);
}
i++;
}
break;
case "text/plain":
case "application/octet-stream":
break;
default:
throw new IllegalArgumentException("Unsupported value format " + consumes);
}
}
for (JsonNode step : steps) {
var element = step.fields().next();
String templateId = shardMetadata.getKamelets().getProcessors().get(element.getKey());
if (templateId == null) {
throw new IllegalArgumentException("Unknown processor: " + element.getKey());
}
stepDefinitions.add(new ProcessorKamelet(templateId, stepName(i, templateId)));
configureStep(props, (ObjectNode) element.getValue(), i, shardMetadata.getKamelets().getProcessors().get(element.getKey()));
i++;
}
if (produces != null) {
switch(produces) {
case "application/json":
{
String stepName = stepName(i, "cos-encoder-json-action");
stepDefinitions.add(new ProcessorKamelet("cos-encoder-json-action", stepName));
if (shardMetadata.getProducesClass() != null) {
props.put(kameletProperty("cos-encoder-json-action", stepName, "contentClass"), shardMetadata.getProducesClass());
}
}
break;
case "avro/binary":
{
String stepName = stepName(i, "cos-encoder-avro-action");
stepDefinitions.add(new ProcessorKamelet("cos-encoder-avro-action", stepName));
if (shardMetadata.getProducesClass() != null) {
props.put(kameletProperty("cos-encoder-avro-action", stepName, "contentClass"), shardMetadata.getProducesClass());
}
}
break;
case "text/plain":
{
stepDefinitions.add(new ProcessorKamelet("cos-encoder-string-action", stepName(i, "cos-encoder-string-action")));
}
break;
case "application/octet-stream":
{
stepDefinitions.add(new ProcessorKamelet("cos-encoder-bytearray-action", stepName(i, "cos-encoder-bytearray-action")));
}
break;
default:
throw new IllegalArgumentException("Unsupported value format " + produces);
}
}
// If it is a sink, then it consumes from kafka
if (isSink(shardMetadata)) {
props.put(String.format("camel.kamelet.%s.valueDeserializer", shardMetadata.getKamelets().getKafka().getName()), "org.bf2.cos.connector.camel.serdes.bytes.ByteArrayDeserializer");
if ("application/json".equals(consumes) && hasSchemaRegistry(connector)) {
props.put(String.format("camel.kamelet.%s.valueDeserializer", shardMetadata.getKamelets().getKafka().getName()), "org.bf2.cos.connector.camel.serdes.json.JsonDeserializer");
}
if ("avro/binary".equals(produces) && hasSchemaRegistry(connector)) {
props.put(String.format("camel.kamelet.%s.valueDeserializer", shardMetadata.getKamelets().getKafka().getName()), "org.bf2.cos.connector.camel.serdes.avro.AvroDeserializer");
}
}
// If it is a source, then it produces to kafka
if (isSource(shardMetadata)) {
props.put(String.format("camel.kamelet.%s.valueSerializer", shardMetadata.getKamelets().getKafka().getName()), "org.bf2.cos.connector.camel.serdes.bytes.ByteArraySerializer");
if ("application/json".equals(produces) && hasSchemaRegistry(connector)) {
props.put(String.format("camel.kamelet.%s.valueSerializer", shardMetadata.getKamelets().getKafka().getName()), "org.bf2.cos.connector.camel.serdes.json.JsonSerializer");
}
if ("avro/binary".equals(produces) && hasSchemaRegistry(connector)) {
props.put(String.format("camel.kamelet.%s.valueSerializer", shardMetadata.getKamelets().getKafka().getName()), "org.bf2.cos.connector.camel.serdes.avro.AvroSerializer");
}
}
return stepDefinitions;
}
use of org.bf2.operator.operands.KafkaInstanceConfiguration.Kafka in project cos-fleetshard by bf2fc6cc711aee1a0c2a.
the class AbstractApicurioConverter method getAdditionalConfig.
@Override
public Map<String, String> getAdditionalConfig(ManagedConnector config, ServiceAccountSpec serviceAccountSpec) {
Map<String, String> additionalConfig = new HashMap<>();
additionalConfig.put("apicurio.auth.service.url", APICURIO_AUTH_SERVICE_URL);
additionalConfig.put("apicurio.auth.realm", "rhoas");
SchemaRegistrySpec schemaRegistrySpec = config.getSpec().getDeployment().getSchemaRegistry();
if (null == schemaRegistrySpec || null == schemaRegistrySpec.getUrl() || schemaRegistrySpec.getUrl().isBlank()) {
throw new RuntimeException("Can't create a schema-based connector without providing a valid 'schema_registry'");
}
String schemaRegistryURL = schemaRegistrySpec.getUrl();
additionalConfig.put("apicurio.registry.url", schemaRegistryURL);
additionalConfig.put("apicurio.auth.client.id", serviceAccountSpec.getClientId());
additionalConfig.put("apicurio.auth.client.secret", "${dir:/opt/kafka/external-configuration/" + DebeziumConstants.EXTERNAL_CONFIG_DIRECTORY + ":" + DebeziumConstants.KAFKA_CLIENT_SECRET_KEY + "}");
additionalConfig.put("apicurio.registry.auto-register", "true");
additionalConfig.put("apicurio.registry.find-latest", "true");
return additionalConfig;
}
Aggregations