use of org.apache.kafka.clients.admin.DescribeConfigsResult in project ksql by confluentinc.
the class KafkaTopicClientImplTest method topicConfigResponse.
private static DescribeConfigsResult topicConfigResponse(final Exception cause) {
final DescribeConfigsResult response = mock(DescribeConfigsResult.class);
expect(response.all()).andReturn(failedFuture(cause));
replay(response);
return response;
}
use of org.apache.kafka.clients.admin.DescribeConfigsResult in project ksql by confluentinc.
the class KafkaTopicClientImplTest method topicConfigResponse.
private static DescribeConfigsResult topicConfigResponse(final String topicName, final ConfigEntry... entries) {
final Map<ConfigResource, Config> config = ImmutableMap.of(new ConfigResource(ConfigResource.Type.TOPIC, topicName), new Config(Arrays.asList(entries)));
final DescribeConfigsResult response = mock(DescribeConfigsResult.class);
expect(response.all()).andReturn(KafkaFuture.completedFuture(config));
replay(response);
return response;
}
use of org.apache.kafka.clients.admin.DescribeConfigsResult in project ksql by confluentinc.
the class KafkaTopicClientImplTest method describeBrokerResult.
private DescribeConfigsResult describeBrokerResult() {
DescribeConfigsResult describeConfigsResult = mock(DescribeConfigsResult.class);
ConfigEntry configEntryDeleteEnable = new ConfigEntry("delete.topic.enable", "true");
List<ConfigEntry> configEntries = new ArrayList<>();
configEntries.add(configEntryDeleteEnable);
Map<ConfigResource, Config> config = ImmutableMap.of(new ConfigResource(ConfigResource.Type.BROKER, node.idString()), new Config(configEntries));
expect(describeConfigsResult.all()).andReturn(KafkaFuture.completedFuture(config));
replay(describeConfigsResult);
return describeConfigsResult;
}
use of org.apache.kafka.clients.admin.DescribeConfigsResult in project kafka by apache.
the class MirrorConnectorsIntegrationBaseTest method getTopicConfig.
/*
* retrieve the config value based on the input cluster, topic and config name
*/
protected static String getTopicConfig(EmbeddedKafkaCluster cluster, String topic, String configName) throws Exception {
try (Admin client = cluster.createAdminClient()) {
Collection<ConfigResource> cr = Collections.singleton(new ConfigResource(ConfigResource.Type.TOPIC, topic));
DescribeConfigsResult configsResult = client.describeConfigs(cr);
Config allConfigs = (Config) configsResult.all().get().values().toArray()[0];
return allConfigs.get(configName).value();
}
}
use of org.apache.kafka.clients.admin.DescribeConfigsResult in project kafka by apache.
the class InternalTopicManager method validate.
/**
* Validates the internal topics passed.
*
* The validation of the internal topics verifies if the topics:
* - are missing on the brokers
* - have the expected number of partitions
* - have configured a clean-up policy that avoids data loss
*
* @param topicConfigs internal topics to validate
*
* @return validation results that contains
* - the set of missing internal topics on the brokers
* - descriptions of misconfigurations per topic
*/
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(topicConfigsStillToValidate.stream().map(topic -> new ConfigResource(Type.TOPIC, topic)).collect(Collectors.toSet()));
configsForTopic = describeConfigsResult.values().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide));
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide));
}
maybeThrowTimeoutException(Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs));
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated");
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
Aggregations