use of org.apache.kafka.common.requests.AbstractResponse in project kafka by apache.
the class SaslClientAuthenticator method handleKafkaResponse.
private void handleKafkaResponse(RequestHeader requestHeader, byte[] responseBytes) {
AbstractResponse response;
ApiKeys apiKey;
try {
response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), requestHeader);
apiKey = ApiKeys.forId(requestHeader.apiKey());
} catch (SchemaException | IllegalArgumentException e) {
LOG.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
throw new AuthenticationException("Invalid SASL mechanism response", e);
}
switch(apiKey) {
case SASL_HANDSHAKE:
handleSaslHandshakeResponse((SaslHandshakeResponse) response);
break;
default:
throw new IllegalStateException("Unexpected API key during handshake: " + apiKey);
}
}
use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.
the class SaslClientAuthenticator method receiveKafkaResponse.
private AbstractResponse receiveKafkaResponse() throws IOException {
try {
byte[] responseBytes = receiveResponseOrToken();
if (responseBytes == null)
return null;
else {
AbstractResponse response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), currentRequestHeader);
currentRequestHeader = null;
return response;
}
} catch (SchemaException | IllegalArgumentException e) {
LOG.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
setSaslState(SaslState.FAILED);
throw new IllegalSaslStateException("Invalid SASL mechanism response, server may be expecting a different protocol", e);
}
}
use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method deleteRecords.
public DeleteRecordsResult deleteRecords(final Map<TopicPartition, RecordsToDelete> recordsToDelete, final DeleteRecordsOptions options) {
// requests need to be sent to partitions leader nodes so ...
// ... from the provided map it's needed to create more maps grouping topic/partition per leader
final Map<TopicPartition, KafkaFutureImpl<DeletedRecords>> futures = new HashMap<>(recordsToDelete.size());
for (TopicPartition topicPartition : recordsToDelete.keySet()) {
futures.put(topicPartition, new KafkaFutureImpl<DeletedRecords>());
}
// preparing topics list for asking metadata about them
final Set<String> topics = new HashSet<>();
for (TopicPartition topicPartition : recordsToDelete.keySet()) {
topics.add(topicPartition.topic());
}
final long nowMetadata = time.milliseconds();
final long deadline = calcDeadlineMs(nowMetadata, options.timeoutMs());
// asking for topics metadata for getting partitions leaders
runnable.call(new Call("topicsMetadata", deadline, new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new MetadataRequest.Builder(new ArrayList<>(topics), false);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
MetadataResponse response = (MetadataResponse) abstractResponse;
Map<String, Errors> errors = response.errors();
Cluster cluster = response.cluster();
// completing futures for topics with errors
for (Map.Entry<String, Errors> topicError : errors.entrySet()) {
for (Map.Entry<TopicPartition, KafkaFutureImpl<DeletedRecords>> future : futures.entrySet()) {
if (future.getKey().topic().equals(topicError.getKey())) {
future.getValue().completeExceptionally(topicError.getValue().exception());
}
}
}
// grouping topic partitions per leader
Map<Node, Map<TopicPartition, Long>> leaders = new HashMap<>();
for (Map.Entry<TopicPartition, RecordsToDelete> entry : recordsToDelete.entrySet()) {
// avoiding to send deletion request for topics with errors
if (!errors.containsKey(entry.getKey().topic())) {
Node node = cluster.leaderFor(entry.getKey());
if (node != null) {
if (!leaders.containsKey(node))
leaders.put(node, new HashMap<TopicPartition, Long>());
leaders.get(node).put(entry.getKey(), entry.getValue().beforeOffset());
} else {
KafkaFutureImpl<DeletedRecords> future = futures.get(entry.getKey());
future.completeExceptionally(Errors.LEADER_NOT_AVAILABLE.exception());
}
}
}
for (final Map.Entry<Node, Map<TopicPartition, Long>> entry : leaders.entrySet()) {
final long nowDelete = time.milliseconds();
final int brokerId = entry.getKey().id();
runnable.call(new Call("deleteRecords", deadline, new ConstantNodeIdProvider(brokerId)) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DeleteRecordsRequest.Builder(timeoutMs, entry.getValue());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse;
for (Map.Entry<TopicPartition, DeleteRecordsResponse.PartitionResponse> result : response.responses().entrySet()) {
KafkaFutureImpl<DeletedRecords> future = futures.get(result.getKey());
if (result.getValue().error == Errors.NONE) {
future.complete(new DeletedRecords(result.getValue().lowWatermark));
} else {
future.completeExceptionally(result.getValue().error.exception());
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, nowDelete);
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, nowMetadata);
return new DeleteRecordsResult(new HashMap<TopicPartition, KafkaFuture<DeletedRecords>>(futures));
}
use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method createAcls.
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
final long now = time.milliseconds();
final Map<AclBinding, KafkaFutureImpl<Void>> futures = new HashMap<>();
final List<AclCreation> aclCreations = new ArrayList<>();
for (AclBinding acl : acls) {
if (futures.get(acl) == null) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(acl, future);
String indefinite = acl.toFilter().findIndefiniteField();
if (indefinite == null) {
aclCreations.add(new AclCreation(acl));
} else {
future.completeExceptionally(new InvalidRequestException("Invalid ACL creation: " + indefinite));
}
}
}
runnable.call(new Call("createAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new CreateAclsRequest.Builder(aclCreations);
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
CreateAclsResponse response = (CreateAclsResponse) abstractResponse;
List<AclCreationResponse> responses = response.aclCreationResponses();
Iterator<AclCreationResponse> iter = responses.iterator();
for (AclCreation aclCreation : aclCreations) {
KafkaFutureImpl<Void> future = futures.get(aclCreation.acl());
if (!iter.hasNext()) {
future.completeExceptionally(new UnknownServerException("The broker reported no creation result for the given ACL."));
} else {
AclCreationResponse creation = iter.next();
if (creation.error().isFailure()) {
future.completeExceptionally(creation.error().exception());
} else {
future.complete(null);
}
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return new CreateAclsResult(new HashMap<AclBinding, KafkaFuture<Void>>(futures));
}
use of org.apache.kafka.common.requests.AbstractResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method describeConfigs.
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Config>> unifiedRequestFutures = new HashMap<>();
final Map<ConfigResource, KafkaFutureImpl<Config>> brokerFutures = new HashMap<>(configResources.size());
// The BROKER resources which we want to describe. We must make a separate DescribeConfigs
// request for every BROKER resource we want to describe.
final Collection<Resource> brokerResources = new ArrayList<>();
// The non-BROKER resources which we want to describe. These resources can be described by a
// single, unified DescribeConfigs request.
final Collection<Resource> unifiedRequestResources = new ArrayList<>(configResources.size());
for (ConfigResource resource : configResources) {
if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
brokerFutures.put(resource, new KafkaFutureImpl<Config>());
brokerResources.add(configResourceToResource(resource));
} else {
unifiedRequestFutures.put(resource, new KafkaFutureImpl<Config>());
unifiedRequestResources.add(configResourceToResource(resource));
}
}
final long now = time.milliseconds();
if (!unifiedRequestResources.isEmpty()) {
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(unifiedRequestResources).includeSynonyms(options.includeSynonyms());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) {
ConfigResource configResource = entry.getKey();
KafkaFutureImpl<Config> future = entry.getValue();
DescribeConfigsResponse.Config config = response.config(configResourceToResource(configResource));
if (config == null) {
future.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + configResource));
continue;
}
if (config.error().isFailure()) {
future.completeExceptionally(config.error().exception());
continue;
}
List<ConfigEntry> configEntries = new ArrayList<>();
for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
}
future.complete(new Config(configEntries));
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unifiedRequestFutures.values(), throwable);
}
}, now);
}
for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : brokerFutures.entrySet()) {
final KafkaFutureImpl<Config> brokerFuture = entry.getValue();
final Resource resource = configResourceToResource(entry.getKey());
final int nodeId = Integer.parseInt(resource.name());
runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(Collections.singleton(resource)).includeSynonyms(options.includeSynonyms());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
DescribeConfigsResponse.Config config = response.configs().get(resource);
if (config == null) {
brokerFuture.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + resource));
return;
}
if (config.error().isFailure())
brokerFuture.completeExceptionally(config.error().exception());
else {
List<ConfigEntry> configEntries = new ArrayList<>();
for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
}
brokerFuture.complete(new Config(configEntries));
}
}
@Override
void handleFailure(Throwable throwable) {
brokerFuture.completeExceptionally(throwable);
}
}, now);
}
final Map<ConfigResource, KafkaFuture<Config>> allFutures = new HashMap<>();
allFutures.putAll(brokerFutures);
allFutures.putAll(unifiedRequestFutures);
return new DescribeConfigsResult(allFutures);
}
Aggregations