use of org.apache.kafka.common.errors.ApiException in project kafka by apache.
the class ReplicationControlManager method createTopics.
ControllerResult<CreateTopicsResponseData> createTopics(CreateTopicsRequestData request) {
Map<String, ApiError> topicErrors = new HashMap<>();
List<ApiMessageAndVersion> records = new ArrayList<>();
// Check the topic names.
validateNewTopicNames(topicErrors, request.topics());
// Identify topics that already exist and mark them with the appropriate error
request.topics().stream().filter(creatableTopic -> topicsByName.containsKey(creatableTopic.name())).forEach(t -> topicErrors.put(t.name(), new ApiError(Errors.TOPIC_ALREADY_EXISTS, "Topic '" + t.name() + "' already exists.")));
// Verify that the configurations for the new topics are OK, and figure out what
// ConfigRecords should be created.
Map<ConfigResource, Map<String, Entry<OpType, String>>> configChanges = computeConfigChanges(topicErrors, request.topics());
ControllerResult<Map<ConfigResource, ApiError>> configResult = configurationControl.incrementalAlterConfigs(configChanges, NO_OP_EXISTENCE_CHECKER);
for (Entry<ConfigResource, ApiError> entry : configResult.response().entrySet()) {
if (entry.getValue().isFailure()) {
topicErrors.put(entry.getKey().name(), entry.getValue());
}
}
records.addAll(configResult.records());
// Try to create whatever topics are needed.
Map<String, CreatableTopicResult> successes = new HashMap<>();
for (CreatableTopic topic : request.topics()) {
if (topicErrors.containsKey(topic.name()))
continue;
ApiError error;
try {
error = createTopic(topic, records, successes);
} catch (ApiException e) {
error = ApiError.fromThrowable(e);
}
if (error.isFailure()) {
topicErrors.put(topic.name(), error);
}
}
// Create responses for all topics.
CreateTopicsResponseData data = new CreateTopicsResponseData();
StringBuilder resultsBuilder = new StringBuilder();
String resultsPrefix = "";
for (CreatableTopic topic : request.topics()) {
ApiError error = topicErrors.get(topic.name());
if (error != null) {
data.topics().add(new CreatableTopicResult().setName(topic.name()).setErrorCode(error.error().code()).setErrorMessage(error.message()));
resultsBuilder.append(resultsPrefix).append(topic).append(": ").append(error.error()).append(" (").append(error.message()).append(")");
resultsPrefix = ", ";
continue;
}
CreatableTopicResult result = successes.get(topic.name());
data.topics().add(result);
resultsBuilder.append(resultsPrefix).append(topic).append(": ").append("SUCCESS");
resultsPrefix = ", ";
}
if (request.validateOnly()) {
log.info("Validate-only CreateTopics result(s): {}", resultsBuilder.toString());
return ControllerResult.atomicOf(Collections.emptyList(), data);
} else {
log.info("CreateTopics result(s): {}", resultsBuilder.toString());
return ControllerResult.atomicOf(records, data);
}
}
use of org.apache.kafka.common.errors.ApiException in project kafka by apache.
the class AclControlManager method createAcls.
ControllerResult<List<AclCreateResult>> createAcls(List<AclBinding> acls) {
List<AclCreateResult> results = new ArrayList<>(acls.size());
List<ApiMessageAndVersion> records = new ArrayList<>(acls.size());
for (AclBinding acl : acls) {
try {
validateNewAcl(acl);
} catch (Throwable t) {
ApiException e = (t instanceof ApiException) ? (ApiException) t : new UnknownServerException("Unknown error while trying to create ACL", t);
results.add(new AclCreateResult(e));
continue;
}
StandardAcl standardAcl = StandardAcl.fromAclBinding(acl);
if (existingAcls.add(standardAcl)) {
StandardAclWithId standardAclWithId = new StandardAclWithId(newAclId(), standardAcl);
idToAcl.put(standardAclWithId.id(), standardAcl);
records.add(new ApiMessageAndVersion(standardAclWithId.toRecord(), (short) 0));
}
results.add(AclCreateResult.SUCCESS);
}
return new ControllerResult<>(records, results, true);
}
use of org.apache.kafka.common.errors.ApiException in project kafka by apache.
the class KafkaAdminClient method alterPartitionReassignments.
@Override
public AlterPartitionReassignmentsResult alterPartitionReassignments(Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments, AlterPartitionReassignmentsOptions options) {
final Map<TopicPartition, KafkaFutureImpl<Void>> futures = new HashMap<>();
final Map<String, Map<Integer, Optional<NewPartitionReassignment>>> topicsToReassignments = new TreeMap<>();
for (Map.Entry<TopicPartition, Optional<NewPartitionReassignment>> entry : reassignments.entrySet()) {
String topic = entry.getKey().topic();
int partition = entry.getKey().partition();
TopicPartition topicPartition = new TopicPartition(topic, partition);
Optional<NewPartitionReassignment> reassignment = entry.getValue();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(topicPartition, future);
if (topicNameIsUnrepresentable(topic)) {
future.completeExceptionally(new InvalidTopicException("The given topic name '" + topic + "' cannot be represented in a request."));
} else if (topicPartition.partition() < 0) {
future.completeExceptionally(new InvalidTopicException("The given partition index " + topicPartition.partition() + " is not valid."));
} else {
Map<Integer, Optional<NewPartitionReassignment>> partitionReassignments = topicsToReassignments.get(topicPartition.topic());
if (partitionReassignments == null) {
partitionReassignments = new TreeMap<>();
topicsToReassignments.put(topic, partitionReassignments);
}
partitionReassignments.put(partition, reassignment);
}
}
final long now = time.milliseconds();
Call call = new Call("alterPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()), new ControllerNodeProvider()) {
@Override
public AlterPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) {
AlterPartitionReassignmentsRequestData data = new AlterPartitionReassignmentsRequestData();
for (Map.Entry<String, Map<Integer, Optional<NewPartitionReassignment>>> entry : topicsToReassignments.entrySet()) {
String topicName = entry.getKey();
Map<Integer, Optional<NewPartitionReassignment>> partitionsToReassignments = entry.getValue();
List<ReassignablePartition> reassignablePartitions = new ArrayList<>();
for (Map.Entry<Integer, Optional<NewPartitionReassignment>> partitionEntry : partitionsToReassignments.entrySet()) {
int partitionIndex = partitionEntry.getKey();
Optional<NewPartitionReassignment> reassignment = partitionEntry.getValue();
ReassignablePartition reassignablePartition = new ReassignablePartition().setPartitionIndex(partitionIndex).setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null));
reassignablePartitions.add(reassignablePartition);
}
ReassignableTopic reassignableTopic = new ReassignableTopic().setName(topicName).setPartitions(reassignablePartitions);
data.topics().add(reassignableTopic);
}
data.setTimeoutMs(timeoutMs);
return new AlterPartitionReassignmentsRequest.Builder(data);
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterPartitionReassignmentsResponse response = (AlterPartitionReassignmentsResponse) abstractResponse;
Map<TopicPartition, ApiException> errors = new HashMap<>();
int receivedResponsesCount = 0;
Errors topLevelError = Errors.forCode(response.data().errorCode());
switch(topLevelError) {
case NONE:
receivedResponsesCount += validateTopicResponses(response.data().responses(), errors);
break;
case NOT_CONTROLLER:
handleNotControllerError(topLevelError);
break;
default:
for (ReassignableTopicResponse topicResponse : response.data().responses()) {
String topicName = topicResponse.name();
for (ReassignablePartitionResponse partition : topicResponse.partitions()) {
errors.put(new TopicPartition(topicName, partition.partitionIndex()), new ApiError(topLevelError, response.data().errorMessage()).exception());
receivedResponsesCount += 1;
}
}
break;
}
assertResponseCountMatch(errors, receivedResponsesCount);
for (Map.Entry<TopicPartition, ApiException> entry : errors.entrySet()) {
ApiException exception = entry.getValue();
if (exception == null)
futures.get(entry.getKey()).complete(null);
else
futures.get(entry.getKey()).completeExceptionally(exception);
}
}
private void assertResponseCountMatch(Map<TopicPartition, ApiException> errors, int receivedResponsesCount) {
int expectedResponsesCount = topicsToReassignments.values().stream().mapToInt(Map::size).sum();
if (errors.values().stream().noneMatch(Objects::nonNull) && receivedResponsesCount != expectedResponsesCount) {
String quantifier = receivedResponsesCount > expectedResponsesCount ? "many" : "less";
throw new UnknownServerException("The server returned too " + quantifier + " results." + "Expected " + expectedResponsesCount + " but received " + receivedResponsesCount);
}
}
private int validateTopicResponses(List<ReassignableTopicResponse> topicResponses, Map<TopicPartition, ApiException> errors) {
int receivedResponsesCount = 0;
for (ReassignableTopicResponse topicResponse : topicResponses) {
String topicName = topicResponse.name();
for (ReassignablePartitionResponse partResponse : topicResponse.partitions()) {
Errors partitionError = Errors.forCode(partResponse.errorCode());
TopicPartition tp = new TopicPartition(topicName, partResponse.partitionIndex());
if (partitionError == Errors.NONE) {
errors.put(tp, null);
} else {
errors.put(tp, new ApiError(partitionError, partResponse.errorMessage()).exception());
}
receivedResponsesCount += 1;
}
}
return receivedResponsesCount;
}
@Override
void handleFailure(Throwable throwable) {
for (KafkaFutureImpl<Void> future : futures.values()) {
future.completeExceptionally(throwable);
}
}
};
if (!topicsToReassignments.isEmpty()) {
runnable.call(call, now);
}
return new AlterPartitionReassignmentsResult(new HashMap<>(futures));
}
use of org.apache.kafka.common.errors.ApiException in project kafka by apache.
the class KafkaAdminClient method incrementalAlterConfigs.
private Map<ConfigResource, KafkaFutureImpl<Void>> incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs, final AlterConfigsOptions options, Collection<ConfigResource> resources, NodeProvider nodeProvider) {
final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
for (ConfigResource resource : resources) futures.put(resource, new KafkaFutureImpl<>());
final long now = time.milliseconds();
runnable.call(new Call("incrementalAlterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {
@Override
public IncrementalAlterConfigsRequest.Builder createRequest(int timeoutMs) {
return new IncrementalAlterConfigsRequest.Builder(resources, configs, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
IncrementalAlterConfigsResponse response = (IncrementalAlterConfigsResponse) abstractResponse;
Map<ConfigResource, ApiError> errors = IncrementalAlterConfigsResponse.fromResponseData(response.data());
for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
ApiException exception = errors.get(entry.getKey()).exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return futures;
}
Aggregations