use of io.strimzi.operator.common.MaxAttemptsExceededException in project strimzi by strimzi.
the class TopicOperator method onTopicCreated.
/**
* Called when a topic znode is created in ZK
*/
Future<Void> onTopicCreated(LogContext logContext, TopicName topicName) {
// XXX currently runs on the ZK thread, requiring a synchronized inFlight
// is it better to put this check in the topic deleted event?
Reconciliation action = new Reconciliation(logContext, "onTopicCreated", true) {
@Override
public Future<Void> execute() {
Reconciliation self = this;
Promise<Void> promise = Promise.promise();
TopicMetadataHandler handler = new TopicMetadataHandler(vertx, kafka, topicName, topicMetadataBackOff()) {
@Override
public void handle(AsyncResult<TopicMetadata> metadataResult) {
if (metadataResult.succeeded()) {
if (metadataResult.result() == null) {
// In this case it is most likely that we've been notified by ZK
// before Kafka has finished creating the topic, so we retry
// with exponential backoff.
retry(logContext.toReconciliation());
} else {
// We now have the metadata we need to create the
// resource...
Topic kafkaTopic = TopicSerialization.fromTopicMetadata(metadataResult.result());
reconcileOnTopicChange(logContext, topicName, kafkaTopic, self).onComplete(promise);
}
} else {
promise.fail(metadataResult.cause());
}
}
@Override
public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) {
promise.fail(e);
}
};
return awaitExistential(logContext, topicName, true).compose(exists -> {
kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler);
return promise.future();
});
}
};
return executeWithTopicLockHeld(logContext, topicName, action);
}
use of io.strimzi.operator.common.MaxAttemptsExceededException in project strimzi by strimzi.
the class TopicOperator method onTopicPartitionsChanged.
/**
* Called when ZK watch notifies of a change to the topic's partitions
*/
Future<Void> onTopicPartitionsChanged(LogContext logContext, TopicName topicName) {
Reconciliation action = new Reconciliation(logContext, "onTopicPartitionsChanged", true) {
@Override
public Future<Void> execute() {
Reconciliation self = this;
Promise<Void> promise = Promise.promise();
// getting topic information from the private store
topicStore.read(topicName).onComplete(topicResult -> {
TopicMetadataHandler handler = new TopicMetadataHandler(vertx, kafka, topicName, topicMetadataBackOff()) {
@Override
public void handle(AsyncResult<TopicMetadata> metadataResult) {
try {
if (metadataResult.succeeded()) {
// getting topic metadata from Kafka
Topic kafkaTopic = TopicSerialization.fromTopicMetadata(metadataResult.result());
// if partitions aren't changed on Kafka yet, we retry with exponential backoff
if (topicResult.result().getNumPartitions() == kafkaTopic.getNumPartitions()) {
retry(logContext.toReconciliation());
} else {
LOGGER.infoCr(logContext.toReconciliation(), "Topic {} partitions changed to {}", topicName, kafkaTopic.getNumPartitions());
reconcileOnTopicChange(logContext, topicName, kafkaTopic, self).onComplete(promise);
}
} else {
promise.fail(metadataResult.cause());
}
} catch (Throwable t) {
promise.fail(t);
}
}
@Override
public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) {
// it's possible that the watched znode for partitions changes, is changed
// due to a reassignment if we don't observe a partition count change within the backoff
// no need for failing the future in this case
promise.complete();
}
};
kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler);
});
return promise.future();
}
};
return executeWithTopicLockHeld(logContext, topicName, action);
}
use of io.strimzi.operator.common.MaxAttemptsExceededException in project strimzi-kafka-operator by strimzi.
the class TopicOperator method onTopicCreated.
/**
* Called when a topic znode is created in ZK
*/
Future<Void> onTopicCreated(LogContext logContext, TopicName topicName) {
// XXX currently runs on the ZK thread, requiring a synchronized inFlight
// is it better to put this check in the topic deleted event?
Reconciliation action = new Reconciliation(logContext, "onTopicCreated", true) {
@Override
public Future<Void> execute() {
Reconciliation self = this;
Promise<Void> promise = Promise.promise();
TopicMetadataHandler handler = new TopicMetadataHandler(vertx, kafka, topicName, topicMetadataBackOff()) {
@Override
public void handle(AsyncResult<TopicMetadata> metadataResult) {
if (metadataResult.succeeded()) {
if (metadataResult.result() == null) {
// In this case it is most likely that we've been notified by ZK
// before Kafka has finished creating the topic, so we retry
// with exponential backoff.
retry(logContext.toReconciliation());
} else {
// We now have the metadata we need to create the
// resource...
Topic kafkaTopic = TopicSerialization.fromTopicMetadata(metadataResult.result());
reconcileOnTopicChange(logContext, topicName, kafkaTopic, self).onComplete(promise);
}
} else {
promise.fail(metadataResult.cause());
}
}
@Override
public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) {
promise.fail(e);
}
};
return awaitExistential(logContext, topicName, true).compose(exists -> {
kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler);
return promise.future();
});
}
};
return executeWithTopicLockHeld(logContext, topicName, action);
}
use of io.strimzi.operator.common.MaxAttemptsExceededException in project strimzi-kafka-operator by strimzi.
the class TopicOperator method onTopicPartitionsChanged.
/**
* Called when ZK watch notifies of a change to the topic's partitions
*/
Future<Void> onTopicPartitionsChanged(LogContext logContext, TopicName topicName) {
Reconciliation action = new Reconciliation(logContext, "onTopicPartitionsChanged", true) {
@Override
public Future<Void> execute() {
Reconciliation self = this;
Promise<Void> promise = Promise.promise();
// getting topic information from the private store
topicStore.read(topicName).onComplete(topicResult -> {
TopicMetadataHandler handler = new TopicMetadataHandler(vertx, kafka, topicName, topicMetadataBackOff()) {
@Override
public void handle(AsyncResult<TopicMetadata> metadataResult) {
try {
if (metadataResult.succeeded()) {
// getting topic metadata from Kafka
Topic kafkaTopic = TopicSerialization.fromTopicMetadata(metadataResult.result());
// if partitions aren't changed on Kafka yet, we retry with exponential backoff
if (topicResult.result().getNumPartitions() == kafkaTopic.getNumPartitions()) {
retry(logContext.toReconciliation());
} else {
LOGGER.infoCr(logContext.toReconciliation(), "Topic {} partitions changed to {}", topicName, kafkaTopic.getNumPartitions());
reconcileOnTopicChange(logContext, topicName, kafkaTopic, self).onComplete(promise);
}
} else {
promise.fail(metadataResult.cause());
}
} catch (Throwable t) {
promise.fail(t);
}
}
@Override
public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) {
// it's possible that the watched znode for partitions changes, is changed
// due to a reassignment if we don't observe a partition count change within the backoff
// no need for failing the future in this case
promise.complete();
}
};
kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler);
});
return promise.future();
}
};
return executeWithTopicLockHeld(logContext, topicName, action);
}
Aggregations