use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClientTest method testIncrementalAlterConfigs.
@Test
public void testIncrementalAlterConfigs() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// test error scenarios
IncrementalAlterConfigsResponseData responseData = new IncrementalAlterConfigsResponseData();
responseData.responses().add(new AlterConfigsResourceResponse().setResourceName("").setResourceType(ConfigResource.Type.BROKER.id()).setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code()).setErrorMessage("authorization error"));
responseData.responses().add(new AlterConfigsResourceResponse().setResourceName("topic1").setResourceType(ConfigResource.Type.TOPIC.id()).setErrorCode(Errors.INVALID_REQUEST.code()).setErrorMessage("Config value append is not allowed for config"));
env.kafkaClient().prepareResponse(new IncrementalAlterConfigsResponse(responseData));
ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "");
ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, "topic1");
AlterConfigOp alterConfigOp1 = new AlterConfigOp(new ConfigEntry("log.segment.bytes", "1073741"), AlterConfigOp.OpType.SET);
AlterConfigOp alterConfigOp2 = new AlterConfigOp(new ConfigEntry("compression.type", "gzip"), AlterConfigOp.OpType.APPEND);
final Map<ConfigResource, Collection<AlterConfigOp>> configs = new HashMap<>();
configs.put(brokerResource, singletonList(alterConfigOp1));
configs.put(topicResource, singletonList(alterConfigOp2));
AlterConfigsResult result = env.adminClient().incrementalAlterConfigs(configs);
TestUtils.assertFutureError(result.values().get(brokerResource), ClusterAuthorizationException.class);
TestUtils.assertFutureError(result.values().get(topicResource), InvalidRequestException.class);
// Test a call where there are no errors.
responseData = new IncrementalAlterConfigsResponseData();
responseData.responses().add(new AlterConfigsResourceResponse().setResourceName("").setResourceType(ConfigResource.Type.BROKER.id()).setErrorCode(Errors.NONE.code()).setErrorMessage(ApiError.NONE.message()));
env.kafkaClient().prepareResponse(new IncrementalAlterConfigsResponse(responseData));
env.adminClient().incrementalAlterConfigs(Collections.singletonMap(brokerResource, singletonList(alterConfigOp1))).all().get();
}
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClientTest method testDescribeConfigsPartialResponse.
@Test
public void testDescribeConfigsPartialResponse() throws Exception {
ConfigResource topic = new ConfigResource(ConfigResource.Type.TOPIC, "topic");
ConfigResource topic2 = new ConfigResource(ConfigResource.Type.TOPIC, "topic2");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(topic, topic2)).values();
assertEquals(new HashSet<>(asList(topic, topic2)), result.keySet());
result.get(topic);
TestUtils.assertFutureThrows(result.get(topic2), ApiException.class);
}
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClient method describeConfigs.
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
// Partition the requested config resources based on which broker they must be sent to with the
// null broker being used for config resources which can be obtained from any broker
final Map<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> brokerFutures = new HashMap<>(configResources.size());
for (ConfigResource resource : configResources) {
Integer broker = nodeFor(resource);
brokerFutures.compute(broker, (key, value) -> {
if (value == null) {
value = new HashMap<>();
}
value.put(resource, new KafkaFutureImpl<>());
return value;
});
}
final long now = time.milliseconds();
for (Map.Entry<Integer, Map<ConfigResource, KafkaFutureImpl<Config>>> entry : brokerFutures.entrySet()) {
Integer broker = entry.getKey();
Map<ConfigResource, KafkaFutureImpl<Config>> unified = entry.getValue();
runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), broker != null ? new ConstantNodeIdProvider(broker) : new LeastLoadedNodeProvider()) {
@Override
DescribeConfigsRequest.Builder createRequest(int timeoutMs) {
return new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(unified.keySet().stream().map(config -> new DescribeConfigsRequestData.DescribeConfigsResource().setResourceName(config.name()).setResourceType(config.type().id()).setConfigurationKeys(null)).collect(Collectors.toList())).setIncludeSynonyms(options.includeSynonyms()).setIncludeDocumentation(options.includeDocumentation()));
}
@Override
void handleResponse(AbstractResponse abstractResponse) {
DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> entry : response.resultMap().entrySet()) {
ConfigResource configResource = entry.getKey();
DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult = entry.getValue();
KafkaFutureImpl<Config> future = unified.get(configResource);
if (future == null) {
if (broker != null) {
log.warn("The config {} in the response from broker {} is not in the request", configResource, broker);
} else {
log.warn("The config {} in the response from the least loaded broker is not in the request", configResource);
}
} else {
if (describeConfigsResult.errorCode() != Errors.NONE.code()) {
future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()).exception(describeConfigsResult.errorMessage()));
} else {
future.complete(describeConfigResult(describeConfigsResult));
}
}
}
completeUnrealizedFutures(unified.entrySet().stream(), configResource -> "The broker response did not contain a result for config resource " + configResource);
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(unified.values(), throwable);
}
}, now);
}
return new DescribeConfigsResult(new HashMap<>(brokerFutures.entrySet().stream().flatMap(x -> x.getValue().entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))));
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClient method incrementalAlterConfigs.
@Override
public AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs, final AlterConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
// We must make a separate AlterConfigs request for every BROKER resource we want to alter
// and send the request to that specific broker. Other resources are grouped together into
// a single request that may be sent to any broker.
final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
for (ConfigResource resource : configs.keySet()) {
Integer node = nodeFor(resource);
if (node != null) {
NodeProvider nodeProvider = new ConstantNodeIdProvider(node);
allFutures.putAll(incrementalAlterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
} else
unifiedRequestResources.add(resource);
}
if (!unifiedRequestResources.isEmpty())
allFutures.putAll(incrementalAlterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider()));
return new AlterConfigsResult(new HashMap<>(allFutures));
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClient method alterConfigs.
private Map<ConfigResource, KafkaFutureImpl<Void>> alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options, Collection<ConfigResource> resources, NodeProvider nodeProvider) {
final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
final Map<ConfigResource, AlterConfigsRequest.Config> requestMap = new HashMap<>(resources.size());
for (ConfigResource resource : resources) {
List<AlterConfigsRequest.ConfigEntry> configEntries = new ArrayList<>();
for (ConfigEntry configEntry : configs.get(resource).entries()) configEntries.add(new AlterConfigsRequest.ConfigEntry(configEntry.name(), configEntry.value()));
requestMap.put(resource, new AlterConfigsRequest.Config(configEntries));
futures.put(resource, new KafkaFutureImpl<>());
}
final long now = time.milliseconds();
runnable.call(new Call("alterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {
@Override
public AlterConfigsRequest.Builder createRequest(int timeoutMs) {
return new AlterConfigsRequest.Builder(requestMap, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterConfigsResponse response = (AlterConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
ApiException exception = response.errors().get(entry.getKey()).exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return futures;
}
Aggregations