use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClientTest method testDescribeConfigsUnrequested.
@Test
public void testDescribeConfigsUnrequested() throws Exception {
ConfigResource topic = new ConfigResource(ConfigResource.Type.TOPIC, "topic");
ConfigResource unrequested = new ConfigResource(ConfigResource.Type.TOPIC, "unrequested");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList()), new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(unrequested.name()).setResourceType(unrequested.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(topic)).values();
assertEquals(new HashSet<>(asList(topic)), result.keySet());
assertNotNull(result.get(topic).get());
assertNull(result.get(unrequested));
}
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClientTest method testDescribeBrokerAndLogConfigs.
@Test
public void testDescribeBrokerAndLogConfigs() throws Exception {
ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "0");
ConfigResource brokerLoggerResource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "0");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(brokerResource.name()).setResourceType(brokerResource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList()), new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(brokerLoggerResource.name()).setResourceType(brokerLoggerResource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))), env.cluster().nodeById(0));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(brokerResource, brokerLoggerResource)).values();
assertEquals(new HashSet<>(asList(brokerResource, brokerLoggerResource)), result.keySet());
result.get(brokerResource).get();
result.get(brokerLoggerResource).get();
}
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class MockAdminClient method incrementalAlterConfigs.
@Override
public synchronized AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs, AlterConfigsOptions options) {
Map<ConfigResource, KafkaFuture<Void>> futures = new HashMap<>();
for (Map.Entry<ConfigResource, Collection<AlterConfigOp>> entry : configs.entrySet()) {
ConfigResource resource = entry.getKey();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
futures.put(resource, future);
Throwable throwable = handleIncrementalResourceAlteration(resource, entry.getValue());
if (throwable == null) {
future.complete(null);
} else {
future.completeExceptionally(throwable);
}
}
return new AlterConfigsResult(futures);
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class MockAdminClient method describeConfigs.
@Override
public synchronized DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
if (timeoutNextRequests > 0) {
Map<ConfigResource, KafkaFuture<Config>> configs = new HashMap<>();
for (ConfigResource requestedResource : resources) {
KafkaFutureImpl<Config> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
configs.put(requestedResource, future);
}
--timeoutNextRequests;
return new DescribeConfigsResult(configs);
}
Map<ConfigResource, KafkaFuture<Config>> results = new HashMap<>();
for (ConfigResource resource : resources) {
KafkaFutureImpl<Config> future = new KafkaFutureImpl<>();
results.put(resource, future);
try {
future.complete(getResourceDescription(resource));
} catch (Throwable e) {
future.completeExceptionally(e);
}
}
return new DescribeConfigsResult(results);
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class InternalTopicManager method validate.
/**
* Validates the internal topics passed.
*
* The validation of the internal topics verifies if the topics:
* - are missing on the brokers
* - have the expected number of partitions
* - have configured a clean-up policy that avoids data loss
*
* @param topicConfigs internal topics to validate
*
* @return validation results that contains
* - the set of missing internal topics on the brokers
* - descriptions of misconfigurations per topic
*/
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(topicConfigsStillToValidate.stream().map(topic -> new ConfigResource(Type.TOPIC, topic)).collect(Collectors.toSet()));
configsForTopic = describeConfigsResult.values().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide));
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide));
}
maybeThrowTimeoutException(Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs));
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated");
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
Aggregations