use of org.apache.kafka.common.requests.DescribeConfigsResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method describeConfigs.
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Config>> unifiedRequestFutures = new HashMap<>();
final Map<ConfigResource, KafkaFutureImpl<Config>> brokerFutures = new HashMap<>(configResources.size());
// The BROKER resources which we want to describe. We must make a separate DescribeConfigs
// request for every BROKER resource we want to describe.
final Collection<Resource> brokerResources = new ArrayList<>();
// The non-BROKER resources which we want to describe. These resources can be described by a
// single, unified DescribeConfigs request.
final Collection<Resource> unifiedRequestResources = new ArrayList<>(configResources.size());
for (ConfigResource resource : configResources) {
if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
brokerFutures.put(resource, new KafkaFutureImpl<Config>());
brokerResources.add(configResourceToResource(resource));
} else {
unifiedRequestFutures.put(resource, new KafkaFutureImpl<Config>());
unifiedRequestResources.add(configResourceToResource(resource));
}
}
final long now = time.milliseconds();
if (!unifiedRequestResources.isEmpty()) {
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(unifiedRequestResources).includeSynonyms(options.includeSynonyms());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) {
ConfigResource configResource = entry.getKey();
KafkaFutureImpl<Config> future = entry.getValue();
DescribeConfigsResponse.Config config = response.config(configResourceToResource(configResource));
if (config == null) {
future.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + configResource));
continue;
}
if (config.error().isFailure()) {
future.completeExceptionally(config.error().exception());
continue;
}
List<ConfigEntry> configEntries = new ArrayList<>();
for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
}
future.complete(new Config(configEntries));
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unifiedRequestFutures.values(), throwable);
}
}, now);
}
for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : brokerFutures.entrySet()) {
final KafkaFutureImpl<Config> brokerFuture = entry.getValue();
final Resource resource = configResourceToResource(entry.getKey());
final int nodeId = Integer.parseInt(resource.name());
runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {
@Override
AbstractRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(Collections.singleton(resource)).includeSynonyms(options.includeSynonyms());
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
DescribeConfigsResponse.Config config = response.configs().get(resource);
if (config == null) {
brokerFuture.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + resource));
return;
}
if (config.error().isFailure())
brokerFuture.completeExceptionally(config.error().exception());
else {
List<ConfigEntry> configEntries = new ArrayList<>();
for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
}
brokerFuture.complete(new Config(configEntries));
}
}
@Override
void handleFailure(Throwable throwable) {
brokerFuture.completeExceptionally(throwable);
}
}, now);
}
final Map<ConfigResource, KafkaFuture<Config>> allFutures = new HashMap<>();
allFutures.putAll(brokerFutures);
allFutures.putAll(unifiedRequestFutures);
return new DescribeConfigsResult(allFutures);
}
use of org.apache.kafka.common.requests.DescribeConfigsResponse in project kafka by apache.
the class KafkaAdminClient method describeConfigs.
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> brokerFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
brokerFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : brokerFutures.entrySet()) {
Integer broker = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), broker != null ? new ConstantNodeIdProvider(broker) : new LeastLoadedNodeProvider()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(unified.keySet().stream().map(config -> new DescribeConfigsRequestData.DescribeConfigsResource().setResourceName(config.name()).setResourceType(config.type().id()).setConfigurationKeys(null)).collect(Collectors.toList())).setIncludeSynonyms(options.includeSynonyms()).setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (broker != null) {
log.warn("The config {} in the response from broker {} is not in the request", configResource, broker);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request", configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()).exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(unified.entrySet().stream(), configResource -> "The broker response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(new HashMap<>(brokerFutures.entrySet().stream().flatMap(x -> x.getValue().entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))));
}
use of org.apache.kafka.common.requests.DescribeConfigsResponse in project kafka by apache.
the class KafkaAdminClientTest method testDescribeConfigsPartialResponse.
@Test
public void testDescribeConfigsPartialResponse() throws Exception {
ConfigResource topic = new ConfigResource(ConfigResource.Type.TOPIC, "topic");
ConfigResource topic2 = new ConfigResource(ConfigResource.Type.TOPIC, "topic2");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(topic, topic2)).values();
assertEquals(new HashSet<>(asList(topic, topic2)), result.keySet());
result.get(topic);
TestUtils.assertFutureThrows(result.get(topic2), ApiException.class);
}
}
use of org.apache.kafka.common.requests.DescribeConfigsResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testHandleTimeout.
/**
* Test handling timeouts.
*/
// The test is flaky. Should be renabled when this JIRA is fixed: https://issues.apache.org/jira/browse/KAFKA-5792
@Ignore
@Test
public void testHandleTimeout() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
MockTime time = new MockTime();
nodes.put(0, new Node(0, "localhost", 8121));
Cluster cluster = new Cluster("mockClusterId", nodes.values(), Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG, "1", AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG, "1")) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(nodes.get(0));
assertEquals(time, env.time());
assertEquals(env.time(), ((KafkaAdminClient) env.adminClient()).time());
// Make a request with an extremely short timeout.
// Then wait for it to fail by not supplying any response.
log.info("Starting AdminClient#listTopics...");
final ListTopicsResult result = env.adminClient().listTopics(new ListTopicsOptions().timeoutMs(1000));
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return env.kafkaClient().hasInFlightRequests();
}
}, "Timed out waiting for inFlightRequests");
time.sleep(5000);
TestUtils.waitForCondition(new TestCondition() {
@Override
public boolean conditionMet() {
return result.listings().isDone();
}
}, "Timed out waiting for listTopics to complete");
assertFutureError(result.listings(), TimeoutException.class);
log.info("Verified the error result of AdminClient#listTopics");
// The next request should succeed.
time.sleep(5000);
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new org.apache.kafka.common.requests.Resource(TOPIC, "foo"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.<DescribeConfigsResponse.ConfigEntry>emptySet()))));
DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton(new ConfigResource(ConfigResource.Type.TOPIC, "foo")));
time.sleep(5000);
result2.values().get(new ConfigResource(ConfigResource.Type.TOPIC, "foo")).get();
}
}
use of org.apache.kafka.common.requests.DescribeConfigsResponse in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDescribeConfigs.
@Test
public void testDescribeConfigs() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().controller());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new org.apache.kafka.common.requests.Resource(BROKER, "0"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.<DescribeConfigsResponse.ConfigEntry>emptySet()))));
DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, "0")));
result2.all().get();
}
}
Aggregations