use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class K8sImpl method updateResource.
@Override
public Future<KafkaTopic> updateResource(KafkaTopic topicResource) {
Promise<KafkaTopic> handler = Promise.promise();
vertx.executeBlocking(future -> {
try {
KafkaTopic kafkaTopic = operation().inNamespace(namespace).withName(topicResource.getMetadata().getName()).patch(topicResource);
LOGGER.debug("KafkaTopic {} updated with version {}->{}", kafkaTopic != null && kafkaTopic.getMetadata() != null ? kafkaTopic.getMetadata().getName() : null, topicResource.getMetadata() != null ? topicResource.getMetadata().getResourceVersion() : null, kafkaTopic != null && kafkaTopic.getMetadata() != null ? kafkaTopic.getMetadata().getResourceVersion() : null);
future.complete(kafkaTopic);
} catch (Exception e) {
future.fail(e);
}
}, handler);
return handler.future();
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi-kafka-operator by strimzi.
the class K8sTopicWatcher method eventReceived.
@Override
public void eventReceived(Action action, KafkaTopic kafkaTopic) {
ObjectMeta metadata = kafkaTopic.getMetadata();
Map<String, String> labels = metadata.getLabels();
if (kafkaTopic.getSpec() != null) {
LogContext logContext = LogContext.kubeWatch(action, kafkaTopic).withKubeTopic(kafkaTopic);
String name = metadata.getName();
String kind = kafkaTopic.getKind();
if (!initReconcileFuture.isComplete()) {
LOGGER.debugCr(logContext.toReconciliation(), "Ignoring initial event for {} {} during initial reconcile", kind, name);
return;
}
if (action.equals(Action.ERROR)) {
LOGGER.errorCr(logContext.toReconciliation(), "Watch received action=ERROR for {} {} {}", kind, name, kafkaTopic);
} else {
PauseAnnotationChanges pauseAnnotationChanges = pausedAnnotationChanged(kafkaTopic);
if (action.equals(Action.DELETED) || shouldReconcile(kafkaTopic, metadata, pauseAnnotationChanges.isChanged())) {
if (pauseAnnotationChanges.isResourcePausedByAnno()) {
topicOperator.pausedTopicCounter.getAndIncrement();
} else if (pauseAnnotationChanges.isResourceUnpausedByAnno()) {
topicOperator.pausedTopicCounter.getAndDecrement();
}
LOGGER.infoCr(logContext.toReconciliation(), "event {} on resource {} generation={}, labels={}", action, name, metadata.getGeneration(), labels);
Handler<AsyncResult<Void>> resultHandler = ar -> {
if (ar.succeeded()) {
LOGGER.infoCr(logContext.toReconciliation(), "Success processing event {} on resource {} with labels {}", action, name, labels);
} else {
String message;
if (ar.cause() instanceof InvalidTopicException) {
message = kind + " " + name + " has an invalid spec section: " + ar.cause().getMessage();
LOGGER.errorCr(logContext.toReconciliation(), message);
} else {
message = "Failure processing " + kind + " watch event " + action + " on resource " + name + " with labels " + labels + ": " + ar.cause().getMessage();
LOGGER.errorCr(logContext.toReconciliation(), message, ar.cause());
}
topicOperator.enqueue(logContext, topicOperator.new Event(logContext, kafkaTopic, message, TopicOperator.EventType.WARNING, errorResult -> {
}));
}
};
topicOperator.onResourceEvent(logContext, kafkaTopic, action).onComplete(resultHandler);
} else {
LOGGER.debugCr(logContext.toReconciliation(), "Ignoring {} to {} {} because metadata.generation==status.observedGeneration", action, kind, name);
}
}
}
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicSerialization method toTopicResource.
/**
* Create a resource to reflect the given Topic.
*/
public static KafkaTopic toTopicResource(Topic topic, Labels labels) {
ResourceName resourceName = topic.getOrAsKubeName();
ObjectMeta om = topic.getMetadata();
Map<String, String> lbls = new HashMap<>(labels.labels());
if (om != null) {
om.setName(resourceName.toString());
if (topic.getMetadata().getLabels() != null)
lbls.putAll(topic.getMetadata().getLabels());
om.setLabels(lbls);
om.setOwnerReferences(topic.getMetadata().getOwnerReferences());
om.setAnnotations(topic.getMetadata().getAnnotations());
} else {
om = new ObjectMetaBuilder().withName(resourceName.toString()).withLabels(lbls).build();
}
KafkaTopic kt = new KafkaTopicBuilder().withMetadata(om).withNewSpec().withTopicName(topic.getTopicName().toString()).withPartitions(topic.getNumPartitions()).withReplicas((int) topic.getNumReplicas()).withConfig(new LinkedHashMap<>(topic.getConfig())).endSpec().build();
// topic is created with annotations={} (empty map but should be null as well)
if (topic.getMetadata() != null)
kt.getMetadata().setAnnotations(topic.getMetadata().getAnnotations());
return kt;
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicST method testMoreReplicasThanAvailableBrokers.
@ParallelTest
void testMoreReplicasThanAvailableBrokers(ExtensionContext extensionContext) {
final String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
int topicReplicationFactor = 5;
int topicPartitions = 5;
KafkaTopic kafkaTopic = KafkaTopicTemplates.topic(TOPIC_CLUSTER_NAME, topicName, topicPartitions, topicReplicationFactor, 1, namespace).build();
resourceManager.createResource(extensionContext, false, kafkaTopic);
assertThat("Topic exists in Kafka CR (Kubernetes)", hasTopicInCRK8s(kafkaTopic, topicName));
assertThat("Topic doesn't exists in Kafka itself", !hasTopicInKafka(topicName, TOPIC_CLUSTER_NAME));
String errorMessage = "org.apache.kafka.common.errors.InvalidReplicationFactorException: Replication factor: 5 larger than available brokers: 3";
KafkaTopicUtils.waitForKafkaTopicNotReady(namespace, topicName);
KafkaTopicStatus kafkaTopicStatus = KafkaTopicResource.kafkaTopicClient().inNamespace(namespace).withName(topicName).get().getStatus();
assertThat(kafkaTopicStatus.getConditions().get(0).getMessage(), containsString(errorMessage));
assertThat(kafkaTopicStatus.getConditions().get(0).getReason(), containsString("CompletionException"));
LOGGER.info("Delete topic {}", topicName);
cmdKubeClient(namespace).deleteByName("kafkatopic", topicName);
KafkaTopicUtils.waitForKafkaTopicDeletion(namespace, topicName);
topicReplicationFactor = 3;
final String newTopicName = "topic-example-new";
kafkaTopic = KafkaTopicTemplates.topic(TOPIC_CLUSTER_NAME, newTopicName, topicPartitions, topicReplicationFactor).editMetadata().withNamespace(namespace).endMetadata().build();
resourceManager.createResource(extensionContext, kafkaTopic);
assertThat("Topic exists in Kafka itself", hasTopicInKafka(newTopicName, TOPIC_CLUSTER_NAME));
assertThat("Topic exists in Kafka CR (Kubernetes)", hasTopicInCRK8s(kafkaTopic, newTopicName));
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicST method testCreateTopicViaKafka.
@ParallelTest
void testCreateTopicViaKafka(ExtensionContext extensionContext) {
String topicName = mapWithTestTopics.get(extensionContext.getDisplayName());
int topicPartitions = 3;
LOGGER.debug("Creating topic {} with {} replicas and {} partitions", topicName, 3, topicPartitions);
KafkaCmdClient.createTopicUsingPodCli(namespace, TOPIC_CLUSTER_NAME, 0, topicName, 3, topicPartitions);
KafkaTopic kafkaTopic = KafkaTopicResource.kafkaTopicClient().inNamespace(namespace).withName(topicName).get();
verifyTopicViaKafkaTopicCRK8s(kafkaTopic, topicName, topicPartitions, TOPIC_CLUSTER_NAME);
topicPartitions = 5;
LOGGER.info("Editing topic via Kafka, settings to partitions {}", topicPartitions);
KafkaCmdClient.updateTopicPartitionsCountUsingPodCli(namespace, TOPIC_CLUSTER_NAME, 0, topicName, topicPartitions);
LOGGER.debug("Topic {} updated from {} to {} partitions", topicName, 3, topicPartitions);
KafkaTopicUtils.waitForKafkaTopicPartitionChange(namespace, topicName, topicPartitions);
verifyTopicViaKafka(namespace, topicName, topicPartitions, TOPIC_CLUSTER_NAME);
}
Aggregations