use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class K8sTopicWatcher method eventReceived.
@Override
public void eventReceived(Action action, KafkaTopic kafkaTopic) {
ObjectMeta metadata = kafkaTopic.getMetadata();
Map<String, String> labels = metadata.getLabels();
if (kafkaTopic.getSpec() != null) {
LogContext logContext = LogContext.kubeWatch(action, kafkaTopic).withKubeTopic(kafkaTopic);
String name = metadata.getName();
String kind = kafkaTopic.getKind();
if (!initReconcileFuture.isComplete()) {
LOGGER.debugCr(logContext.toReconciliation(), "Ignoring initial event for {} {} during initial reconcile", kind, name);
return;
}
if (action.equals(Action.ERROR)) {
LOGGER.errorCr(logContext.toReconciliation(), "Watch received action=ERROR for {} {} {}", kind, name, kafkaTopic);
} else {
PauseAnnotationChanges pauseAnnotationChanges = pausedAnnotationChanged(kafkaTopic);
if (action.equals(Action.DELETED) || shouldReconcile(kafkaTopic, metadata, pauseAnnotationChanges.isChanged())) {
if (pauseAnnotationChanges.isResourcePausedByAnno()) {
topicOperator.pausedTopicCounter.getAndIncrement();
} else if (pauseAnnotationChanges.isResourceUnpausedByAnno()) {
topicOperator.pausedTopicCounter.getAndDecrement();
}
LOGGER.infoCr(logContext.toReconciliation(), "event {} on resource {} generation={}, labels={}", action, name, metadata.getGeneration(), labels);
Handler<AsyncResult<Void>> resultHandler = ar -> {
if (ar.succeeded()) {
LOGGER.infoCr(logContext.toReconciliation(), "Success processing event {} on resource {} with labels {}", action, name, labels);
} else {
String message;
if (ar.cause() instanceof InvalidTopicException) {
message = kind + " " + name + " has an invalid spec section: " + ar.cause().getMessage();
LOGGER.errorCr(logContext.toReconciliation(), message);
} else {
message = "Failure processing " + kind + " watch event " + action + " on resource " + name + " with labels " + labels + ": " + ar.cause().getMessage();
LOGGER.errorCr(logContext.toReconciliation(), message, ar.cause());
}
topicOperator.enqueue(logContext, topicOperator.new Event(logContext, kafkaTopic, message, TopicOperator.EventType.WARNING, errorResult -> {
}));
}
};
topicOperator.onResourceEvent(logContext, kafkaTopic, action).onComplete(resultHandler);
} else {
LOGGER.debugCr(logContext.toReconciliation(), "Ignoring {} to {} {} because metadata.generation==status.observedGeneration", action, kind, name);
}
}
}
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class CruiseControlUtils method verifyThatCruiseControlSamplesTopicsArePresent.
public static void verifyThatCruiseControlSamplesTopicsArePresent(String namespaceName, long timeout) {
final int numberOfPartitionsSamplesTopic = 32;
final int numberOfReplicasSamplesTopic = 2;
TestUtils.waitFor("Verify that kafka contains cruise control topics with related configuration.", Constants.GLOBAL_POLL_INTERVAL, timeout, () -> {
KafkaTopic modelTrainingSamples = KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(CRUISE_CONTROL_MODEL_TRAINING_SAMPLES_TOPIC).get();
KafkaTopic partitionsMetricsSamples = KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(CRUISE_CONTROL_PARTITION_METRICS_SAMPLES_TOPIC).get();
if (modelTrainingSamples != null && partitionsMetricsSamples != null) {
boolean hasTopicCorrectPartitionsCount = modelTrainingSamples.getSpec().getPartitions() == numberOfPartitionsSamplesTopic && partitionsMetricsSamples.getSpec().getPartitions() == numberOfPartitionsSamplesTopic;
boolean hasTopicCorrectReplicasCount = modelTrainingSamples.getSpec().getReplicas() == numberOfReplicasSamplesTopic && partitionsMetricsSamples.getSpec().getReplicas() == numberOfReplicasSamplesTopic;
return hasTopicCorrectPartitionsCount && hasTopicCorrectReplicasCount;
}
LOGGER.debug("One of the samples {}, {} topics are not present", CRUISE_CONTROL_MODEL_TRAINING_SAMPLES_TOPIC, CRUISE_CONTROL_PARTITION_METRICS_SAMPLES_TOPIC);
return false;
});
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class CruiseControlUtils method verifyThatKafkaCruiseControlMetricReporterTopicIsPresent.
public static void verifyThatKafkaCruiseControlMetricReporterTopicIsPresent(String namespaceName, long timeout) {
final int numberOfPartitionsMetricTopic = 1;
final int numberOfReplicasMetricTopic = 3;
TestUtils.waitFor("Verify that kafka contains cruise control topics with related configuration.", Constants.GLOBAL_POLL_INTERVAL, timeout, () -> {
KafkaTopic metrics = KafkaTopicResource.kafkaTopicClient().inNamespace(namespaceName).withName(CRUISE_CONTROL_METRICS_TOPIC).get();
boolean hasTopicCorrectPartitionsCount = metrics.getSpec().getPartitions() == numberOfPartitionsMetricTopic;
boolean hasTopicCorrectReplicasCount = metrics.getSpec().getReplicas() == numberOfReplicasMetricTopic;
return hasTopicCorrectPartitionsCount && hasTopicCorrectReplicasCount;
});
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperator method onTopicCreated.
/**
* Called when a topic znode is created in ZK
*/
Future<Void> onTopicCreated(LogContext logContext, TopicName topicName) {
// XXX currently runs on the ZK thread, requiring a synchronized inFlight
// is it better to put this check in the topic deleted event?
Reconciliation action = new Reconciliation(logContext, "onTopicCreated", true) {
@Override
public Future<Void> execute() {
Reconciliation self = this;
Promise<Void> promise = Promise.promise();
TopicMetadataHandler handler = new TopicMetadataHandler(vertx, kafka, topicName, topicMetadataBackOff()) {
@Override
public void handle(AsyncResult<TopicMetadata> metadataResult) {
if (metadataResult.succeeded()) {
if (metadataResult.result() == null) {
// In this case it is most likely that we've been notified by ZK
// before Kafka has finished creating the topic, so we retry
// with exponential backoff.
retry(logContext.toReconciliation());
} else {
// We now have the metadata we need to create the
// resource...
Topic kafkaTopic = TopicSerialization.fromTopicMetadata(metadataResult.result());
reconcileOnTopicChange(logContext, topicName, kafkaTopic, self).onComplete(promise);
}
} else {
promise.fail(metadataResult.cause());
}
}
@Override
public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) {
promise.fail(e);
}
};
return awaitExistential(logContext, topicName, true).compose(exists -> {
kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler);
return promise.future();
});
}
};
return executeWithTopicLockHeld(logContext, topicName, action);
}
use of io.strimzi.api.kafka.model.KafkaTopic in project strimzi by strimzi.
the class TopicOperator method onTopicPartitionsChanged.
/**
* Called when ZK watch notifies of a change to the topic's partitions
*/
Future<Void> onTopicPartitionsChanged(LogContext logContext, TopicName topicName) {
Reconciliation action = new Reconciliation(logContext, "onTopicPartitionsChanged", true) {
@Override
public Future<Void> execute() {
Reconciliation self = this;
Promise<Void> promise = Promise.promise();
// getting topic information from the private store
topicStore.read(topicName).onComplete(topicResult -> {
TopicMetadataHandler handler = new TopicMetadataHandler(vertx, kafka, topicName, topicMetadataBackOff()) {
@Override
public void handle(AsyncResult<TopicMetadata> metadataResult) {
try {
if (metadataResult.succeeded()) {
// getting topic metadata from Kafka
Topic kafkaTopic = TopicSerialization.fromTopicMetadata(metadataResult.result());
// if partitions aren't changed on Kafka yet, we retry with exponential backoff
if (topicResult.result().getNumPartitions() == kafkaTopic.getNumPartitions()) {
retry(logContext.toReconciliation());
} else {
LOGGER.infoCr(logContext.toReconciliation(), "Topic {} partitions changed to {}", topicName, kafkaTopic.getNumPartitions());
reconcileOnTopicChange(logContext, topicName, kafkaTopic, self).onComplete(promise);
}
} else {
promise.fail(metadataResult.cause());
}
} catch (Throwable t) {
promise.fail(t);
}
}
@Override
public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) {
// it's possible that the watched znode for partitions changes, is changed
// due to a reassignment if we don't observe a partition count change within the backoff
// no need for failing the future in this case
promise.complete();
}
};
kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler);
});
return promise.future();
}
};
return executeWithTopicLockHeld(logContext, topicName, action);
}
Aggregations