use of org.apache.kafka.common.errors.ClusterAuthorizationException in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testClusterAuthorizationExceptionInInitProducerIdRequest.
@Test
public void testClusterAuthorizationExceptionInInitProducerIdRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.CLUSTER_AUTHORIZATION_FAILED);
assertFalse(transactionManager.hasProducerId());
assertTrue(transactionManager.hasError());
assertTrue(transactionManager.lastError() instanceof ClusterAuthorizationException);
// cluster authorization is a fatal error for the producer
assertSendFailure(ClusterAuthorizationException.class);
}
use of org.apache.kafka.common.errors.ClusterAuthorizationException in project apache-kafka-on-k8s by banzaicloud.
the class TopicAdmin method createTopics.
/**
* Attempt to create the topics described by the given definitions, returning all of the names of those topics that
* were created by this request. Any existing topics with the same name are unchanged, and the names of such topics
* are excluded from the result.
* <p>
* If multiple topic definitions have the same topic name, the last one with that name will be used.
* <p>
* Apache Kafka added support for creating topics in 0.10.1.0, so this method works as expected with that and later versions.
* With brokers older than 0.10.1.0, this method is unable to create topics and always returns an empty set.
*
* @param topics the specifications of the topics
* @return the names of the topics that were created by this operation; never null but possibly empty
* @throws ConnectException if an error occurs, the operation takes too long, or the thread is interrupted while
* attempting to perform this operation
*/
public Set<String> createTopics(NewTopic... topics) {
Map<String, NewTopic> topicsByName = new HashMap<>();
if (topics != null) {
for (NewTopic topic : topics) {
if (topic != null)
topicsByName.put(topic.name(), topic);
}
}
if (topicsByName.isEmpty())
return Collections.emptySet();
String bootstrapServers = bootstrapServers();
String topicNameList = Utils.join(topicsByName.keySet(), "', '");
// Attempt to create any missing topics
CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false);
Map<String, KafkaFuture<Void>> newResults = admin.createTopics(topicsByName.values(), args).values();
// Iterate over each future so that we can handle individual failures like when some topics already exist
Set<String> newlyCreatedTopicNames = new HashSet<>();
for (Map.Entry<String, KafkaFuture<Void>> entry : newResults.entrySet()) {
String topic = entry.getKey();
try {
entry.getValue().get();
log.info("Created topic {} on brokers at {}", topicsByName.get(topic), bootstrapServers);
newlyCreatedTopicNames.add(topic);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof TopicExistsException) {
log.debug("Found existing topic '{}' on the brokers at {}", topic, bootstrapServers);
continue;
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic(s) '{}' since the brokers at {} do not support the CreateTopics API.", " Falling back to assume topic(s) exist or will be auto-created by the broker.", topicNameList, bootstrapServers);
return Collections.emptySet();
}
if (cause instanceof ClusterAuthorizationException) {
log.debug("Not authorized to create topic(s) '{}'." + " Falling back to assume topic(s) exist or will be auto-created by the broker.", topicNameList, bootstrapServers);
return Collections.emptySet();
}
if (cause instanceof TimeoutException) {
// Timed out waiting for the operation to complete
throw new ConnectException("Timed out while checking for or creating topic(s) '" + topicNameList + "'." + " This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.", cause);
}
throw new ConnectException("Error while attempting to create/find topic(s) '" + topicNameList + "'", e);
} catch (InterruptedException e) {
Thread.interrupted();
throw new ConnectException("Interrupted while attempting to create/find topic(s) '" + topicNameList + "'", e);
}
}
return newlyCreatedTopicNames;
}
use of org.apache.kafka.common.errors.ClusterAuthorizationException in project strimzi by strimzi.
the class ControllerTest method testOnConfigMapAdded_ClusterAuthorizationException.
/**
* 1. controller is notified that a ConfigMap is created
* 2. error when creating topic in kafka
*/
@Test
public void testOnConfigMapAdded_ClusterAuthorizationException(TestContext context) {
Exception createException = new ClusterAuthorizationException("");
Controller op = configMapAdded(context, createException, null);
// TODO check a k8s event got created
// TODO what happens when we subsequently reconcile?
}
use of org.apache.kafka.common.errors.ClusterAuthorizationException in project apache-kafka-on-k8s by banzaicloud.
the class Sender method completeBatch.
/**
* Complete or retry the given batch of records.
*
* @param batch The record batch
* @param response The produce response
* @param correlationId The correlation id for the request
* @param now The current POSIX timestamp in milliseconds
*/
private void completeBatch(ProducerBatch batch, ProduceResponse.PartitionResponse response, long correlationId, long now) {
Errors error = response.error;
if (error == Errors.MESSAGE_TOO_LARGE && batch.recordCount > 1 && (batch.magic() >= RecordBatch.MAGIC_VALUE_V2 || batch.isCompressed())) {
// If the batch is too large, we split the batch and send the split batches again. We do not decrement
// the retry attempts in this case.
log.warn("Got error produce response in correlation id {} on topic-partition {}, splitting and retrying ({} attempts left). Error: {}", correlationId, batch.topicPartition, this.retries - batch.attempts(), error);
if (transactionManager != null)
transactionManager.removeInFlightBatch(batch);
this.accumulator.splitAndReenqueue(batch);
this.accumulator.deallocate(batch);
this.sensors.recordBatchSplit();
} else if (error != Errors.NONE) {
if (canRetry(batch, response)) {
log.warn("Got error produce response with correlation id {} on topic-partition {}, retrying ({} attempts left). Error: {}", correlationId, batch.topicPartition, this.retries - batch.attempts() - 1, error);
if (transactionManager == null) {
reenqueueBatch(batch, now);
} else if (transactionManager.hasProducerIdAndEpoch(batch.producerId(), batch.producerEpoch())) {
// If idempotence is enabled only retry the request if the current producer id is the same as
// the producer id of the batch.
log.debug("Retrying batch to topic-partition {}. ProducerId: {}; Sequence number : {}", batch.topicPartition, batch.producerId(), batch.baseSequence());
reenqueueBatch(batch, now);
} else {
failBatch(batch, response, new OutOfOrderSequenceException("Attempted to retry sending a " + "batch but the producer id changed from " + batch.producerId() + " to " + transactionManager.producerIdAndEpoch().producerId + " in the mean time. This batch will be dropped."), false);
}
} else if (error == Errors.DUPLICATE_SEQUENCE_NUMBER) {
// If we have received a duplicate sequence error, it means that the sequence number has advanced beyond
// the sequence of the current batch, and we haven't retained batch metadata on the broker to return
// the correct offset and timestamp.
//
// The only thing we can do is to return success to the user and not return a valid offset and timestamp.
completeBatch(batch, response);
} else {
final RuntimeException exception;
if (error == Errors.TOPIC_AUTHORIZATION_FAILED)
exception = new TopicAuthorizationException(batch.topicPartition.topic());
else if (error == Errors.CLUSTER_AUTHORIZATION_FAILED)
exception = new ClusterAuthorizationException("The producer is not authorized to do idempotent sends");
else
exception = error.exception();
// tell the user the result of their request. We only adjust sequence numbers if the batch didn't exhaust
// its retries -- if it did, we don't know whether the sequence number was accepted or not, and
// thus it is not safe to reassign the sequence.
failBatch(batch, response, exception, batch.attempts() < this.retries);
}
if (error.exception() instanceof InvalidMetadataException) {
if (error.exception() instanceof UnknownTopicOrPartitionException)
log.warn("Received unknown topic or partition error in produce request on partition {}. The " + "topic/partition may not exist or the user may not have Describe access to it", batch.topicPartition);
metadata.requestUpdate();
}
} else {
completeBatch(batch, response);
}
// Unmute the completed partition.
if (guaranteeMessageOrder)
this.accumulator.unmutePartition(batch.topicPartition);
}
Aggregations