use of org.apache.kafka.common.errors.UnknownServerException in project kafka by apache.
the class AclControlManager method createAcls.
ControllerResult<List<AclCreateResult>> createAcls(List<AclBinding> acls) {
List<AclCreateResult> results = new ArrayList<>(acls.size());
List<ApiMessageAndVersion> records = new ArrayList<>(acls.size());
for (AclBinding acl : acls) {
try {
validateNewAcl(acl);
} catch (Throwable t) {
ApiException e = (t instanceof ApiException) ? (ApiException) t : new UnknownServerException("Unknown error while trying to create ACL", t);
results.add(new AclCreateResult(e));
continue;
}
StandardAcl standardAcl = StandardAcl.fromAclBinding(acl);
if (existingAcls.add(standardAcl)) {
StandardAclWithId standardAclWithId = new StandardAclWithId(newAclId(), standardAcl);
idToAcl.put(standardAclWithId.id(), standardAcl);
records.add(new ApiMessageAndVersion(standardAclWithId.toRecord(), (short) 0));
}
results.add(AclCreateResult.SUCCESS);
}
return new ControllerResult<>(records, results, true);
}
use of org.apache.kafka.common.errors.UnknownServerException in project kafka by apache.
the class ProducerIdControlManager method generateNextProducerId.
ControllerResult<ProducerIdsBlock> generateNextProducerId(int brokerId, long brokerEpoch) {
clusterControlManager.checkBrokerEpoch(brokerId, brokerEpoch);
long firstProducerIdInBlock = nextProducerId.get();
if (firstProducerIdInBlock > Long.MAX_VALUE - ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE) {
throw new UnknownServerException("Exhausted all producerIds as the next block's end producerId " + "has exceeded the int64 type limit");
}
ProducerIdsBlock block = new ProducerIdsBlock(brokerId, firstProducerIdInBlock, ProducerIdsBlock.PRODUCER_ID_BLOCK_SIZE);
long newNextProducerId = block.nextBlockFirstId();
ProducerIdsRecord record = new ProducerIdsRecord().setNextProducerId(newNextProducerId).setBrokerId(brokerId).setBrokerEpoch(brokerEpoch);
return ControllerResult.of(Collections.singletonList(new ApiMessageAndVersion(record, (short) 0)), block);
}
use of org.apache.kafka.common.errors.UnknownServerException in project kafka by apache.
the class KafkaAdminClient method alterPartitionReassignments.
@Override
public AlterPartitionReassignmentsResult alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments, AlterPartitionReassignmentsOptions options) {
final Map<TopicPartition, KafkaFutureImpl<Void>> futures = new HashMap<>();
final Map<String, Map<Integer, Optional<NewPartitionReassignment>>> topicsToReassignments = new TreeMap<>();
for (Map.Entry<TopicPartition, Optional<NewPartitionReassignment>> entry : reassignments.entrySet()) {
String topic = entry.getKey().topic();
int partition = entry.getKey().partition();
TopicPartition topicPartition = new TopicPartition(topic, partition);
Optional<NewPartitionReassignment> reassignment = entry.getValue();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(topicPartition, future);
if (topicNameIsUnrepresentable(topic)) {
future.completeExceptionally(new InvalidTopicException("The given topic name '" + topic + "' cannot be represented in a request."));
} else if (topicPartition.partition() < 0) {
future.completeExceptionally(new InvalidTopicException("The given partition index " + topicPartition.partition() + " is not valid."));
} else {
Map<Integer, Optional<NewPartitionReassignment>> partitionReassignments = topicsToReassignments.get(topicPartition.topic());
if (partitionReassignments == null) {
partitionReassignments = new TreeMap<>();
topicsToReassignments.put(topic, partitionReassignments);
}
partitionReassignments.put(partition, reassignment);
}
}
final long now = time.milliseconds();
Call call = new Call("alterPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {
@Override
public AlterPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) {
AlterPartitionReassignmentsRequestData data = new AlterPartitionReassignmentsRequestData();
for (Map.Entry<String, Map<Integer, Optional<NewPartitionReassignment>>> entry : topicsToReassignments.entrySet()) {
String topicName = entry.getKey();
Map<Integer, Optional<NewPartitionReassignment>> partitionsToReassignments = entry.getValue();
List<ReassignablePartition> reassignablePartitions = new ArrayList<>();
for (Map.Entry<Integer, Optional<NewPartitionReassignment>> partitionEntry : partitionsToReassignments.entrySet()) {
int partitionIndex = partitionEntry.getKey();
Optional<NewPartitionReassignment> reassignment = partitionEntry.getValue();
ReassignablePartition reassignablePartition = new ReassignablePartition().setPartitionIndex(partitionIndex).setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null));
reassignablePartitions.add(reassignablePartition);
}
ReassignableTopic reassignableTopic = new ReassignableTopic().setName(topicName).setPartitions(reassignablePartitions);
data.topics().add(reassignableTopic);
}
data.setTimeoutMs(timeoutMs);
return new AlterPartitionReassignmentsRequest.Builder(data);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterPartitionReassignmentsResponse response = (AlterPartitionReassignmentsResponse) abstractResponse;
Map<TopicPartition, ApiException> errors = new HashMap<>();
int receivedResponsesCount = 0;
Errors topLevelError = Errors.forCode(response.data().errorCode());
switch(topLevelError) {
case NONE:
receivedResponsesCount += validateTopicResponses(response.data().responses(), errors);
break;
case NOT_CONTROLLER:
handleNotControllerError(topLevelError);
break;
default:
for (ReassignableTopicResponse topicResponse : response.data().responses()) {
String topicName = topicResponse.name();
for (ReassignablePartitionResponse partition : topicResponse.partitions()) {
errors.put(new TopicPartition(topicName, partition.partitionIndex()), new ApiError(topLevelError, response.data().errorMessage()).exception());
receivedResponsesCount += 1;
}
}
break;
}
assertResponseCountMatch(errors, receivedResponsesCount);
for (Map.Entry<TopicPartition, ApiException> entry : errors.entrySet()) {
ApiException exception = entry.getValue();
if (exception == null)
futures.get(entry.getKey()).complete(null);
else
futures.get(entry.getKey()).completeExceptionally(exception);
}
}
private void assertResponseCountMatch(Map<TopicPartition, ApiException> errors, int receivedResponsesCount) {
int expectedResponsesCount = topicsToReassignments.values().stream().mapToInt(Map::size).sum();
if (errors.values().stream().noneMatch(Objects::nonNull) && receivedResponsesCount != expectedResponsesCount) {
String quantifier = receivedResponsesCount > expectedResponsesCount ? "many" : "less";
throw new UnknownServerException("The server returned too " + quantifier + " results." + "Expected " + expectedResponsesCount + " but received " + receivedResponsesCount);
}
}
private int validateTopicResponses(List<ReassignableTopicResponse> topicResponses, Map<TopicPartition, ApiException> errors) {
int receivedResponsesCount = 0;
for (ReassignableTopicResponse topicResponse : topicResponses) {
String topicName = topicResponse.name();
for (ReassignablePartitionResponse partResponse : topicResponse.partitions()) {
Errors partitionError = Errors.forCode(partResponse.errorCode());
TopicPartition tp = new TopicPartition(topicName, partResponse.partitionIndex());
if (partitionError == Errors.NONE) {
errors.put(tp, null);
} else {
errors.put(tp, new ApiError(partitionError, partResponse.errorMessage()).exception());
}
receivedResponsesCount += 1;
}
}
return receivedResponsesCount;
}
@Override
void handleFailure(Throwable throwable) {
for (KafkaFutureImpl<Void> future : futures.values()) {
future.completeExceptionally(throwable);
}
}
};
if (!topicsToReassignments.isEmpty()) {
runnable.call(call, now);
}
return new AlterPartitionReassignmentsResult(new HashMap<>(futures));
}
Aggregations