use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeBrokerConfigs.
@Test
public void testDescribeBrokerConfigs() throws Exception {
ConfigResource broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0");
ConfigResource broker1Resource = new ConfigResource(ConfigResource.Type.BROKER, "1");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(broker0Resource.name()).setResourceType(broker0Resource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))), env.cluster().nodeById(0));
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(broker1Resource.name()).setResourceType(broker1Resource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))), env.cluster().nodeById(1));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(broker0Resource, broker1Resource)).values();
assertEquals(new HashSet<>(asList(broker0Resource, broker1Resource)), result.keySet());
result.get(broker0Resource).get();
result.get(broker1Resource).get();
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeReplicaLogDirs.
@Test
public void testDescribeReplicaLogDirs() throws ExecutionException, InterruptedException {
TopicPartitionReplica tpr1 = new TopicPartitionReplica("topic", 12, 1);
TopicPartitionReplica tpr2 = new TopicPartitionReplica("topic", 12, 2);
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
String broker1log0 = "/var/data/kafka0";
String broker1log1 = "/var/data/kafka1";
String broker2log0 = "/var/data/kafka2";
int broker1Log0OffsetLag = 24;
int broker1Log0PartitionSize = 987654321;
int broker1Log1PartitionSize = 123456789;
int broker1Log1OffsetLag = 4321;
env.kafkaClient().prepareResponseFrom(new DescribeLogDirsResponse(new DescribeLogDirsResponseData().setResults(asList(prepareDescribeLogDirsResult(tpr1, broker1log0, broker1Log0PartitionSize, broker1Log0OffsetLag, false), prepareDescribeLogDirsResult(tpr1, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))), env.cluster().nodeById(tpr1.brokerId()));
env.kafkaClient().prepareResponseFrom(prepareDescribeLogDirsResponse(Errors.KAFKA_STORAGE_ERROR, broker2log0), env.cluster().nodeById(tpr2.brokerId()));
DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(tpr1, tpr2));
Map<TopicPartitionReplica, KafkaFuture<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> values = result.values();
assertEquals(TestUtils.toSet(asList(tpr1, tpr2)), values.keySet());
assertNotNull(values.get(tpr1));
assertEquals(broker1log0, values.get(tpr1).get().getCurrentReplicaLogDir());
assertEquals(broker1Log0OffsetLag, values.get(tpr1).get().getCurrentReplicaOffsetLag());
assertEquals(broker1log1, values.get(tpr1).get().getFutureReplicaLogDir());
assertEquals(broker1Log1OffsetLag, values.get(tpr1).get().getFutureReplicaOffsetLag());
assertNotNull(values.get(tpr2));
assertNull(values.get(tpr2).get().getCurrentReplicaLogDir());
assertEquals(-1, values.get(tpr2).get().getCurrentReplicaOffsetLag());
assertNull(values.get(tpr2).get().getFutureReplicaLogDir());
assertEquals(-1, values.get(tpr2).get().getFutureReplicaOffsetLag());
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class TopicAdmin method describeTopicConfigs.
/**
* Attempt to fetch the topic configurations for the given topics.
* Apache Kafka added support for describing topic configurations in 0.11.0.0, so this method
* works as expected with that and later versions. With brokers older than 0.11.0.0, this method
* is unable get the topic configurations and always returns an empty set.
*
* <p>An entry with a null Config is placed into the resulting map for any topic that does
* not exist on the brokers.
*
* @param topicNames the topics to obtain configurations
* @return the map of topic configurations for each existing topic, or an empty map if none
* of the topics exist
* @throws RetriableException if a retriable error occurs, the operation takes too long, or the
* thread is interrupted while attempting to perform this operation
* @throws ConnectException if a non retriable error occurs
*/
public Map<String, Config> describeTopicConfigs(String... topicNames) {
if (topicNames == null) {
return Collections.emptyMap();
}
Collection<String> topics = Arrays.stream(topicNames).filter(Objects::nonNull).map(String::trim).filter(s -> !s.isEmpty()).collect(Collectors.toList());
if (topics.isEmpty()) {
return Collections.emptyMap();
}
String bootstrapServers = bootstrapServers();
String topicNameList = String.join(", ", topics);
Collection<ConfigResource> resources = topics.stream().map(t -> new ConfigResource(ConfigResource.Type.TOPIC, t)).collect(Collectors.toList());
Map<ConfigResource, KafkaFuture<Config>> newResults = admin.describeConfigs(resources, new DescribeConfigsOptions()).values();
// Iterate over each future so that we can handle individual failures like when some topics don't exist
Map<String, Config> result = new HashMap<>();
newResults.forEach((resource, configs) -> {
String topic = resource.name();
try {
result.put(topic, configs.get());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof UnknownTopicOrPartitionException) {
log.debug("Topic '{}' does not exist on the brokers at {}", topic, bootstrapServers);
result.put(topic, null);
} else if (cause instanceof ClusterAuthorizationException || cause instanceof TopicAuthorizationException) {
log.debug("Not authorized to describe topic config for topic '{}' on brokers at {}", topic, bootstrapServers);
} else if (cause instanceof UnsupportedVersionException) {
log.debug("API to describe topic config for topic '{}' is unsupported on brokers at {}", topic, bootstrapServers);
} else if (cause instanceof TimeoutException) {
String msg = String.format("Timed out while waiting to describe topic config for topic '%s' on brokers at %s", topic, bootstrapServers);
throw new RetriableException(msg, e);
} else {
String msg = String.format("Error while attempting to describe topic config for topic '%s' on brokers at %s", topic, bootstrapServers);
throw new ConnectException(msg, e);
}
} catch (InterruptedException e) {
Thread.interrupted();
String msg = String.format("Interrupted while attempting to describe topic configs '%s'", topicNameList);
throw new RetriableException(msg, e);
}
});
return result;
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class InternalTopicManager method makeReady.
/**
* Prepares a set of given internal topics.
*
* If a topic does not exist creates a new topic.
* If a topic with the correct number of partitions exists ignores it.
* If a topic exists already but has different number of partitions we fail and throw exception requesting user to reset the app before restarting again.
* @return the set of topics which had to be newly created
*/
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.getProperties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.", internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), topicConfig);
newTopics.add(new NewTopic(internalTopicConfig.name(), internalTopicConfig.numberOfPartitions(), Optional.of(replicationFactor)).configs(topicConfig));
}
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info("Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n" + "Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n" + "Error message was: {}", topicName, retryBackOffMs, cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" + "Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null && errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format("Could not create topic %s, because brokers don't support configuration replication.factor=-1." + " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.", topicName));
}
} else {
throw new StreamsException(String.format("Could not create topic %s.", topicName), cause);
}
}
} catch (final TimeoutException retriableException) {
log.error("Creating topic {} timed out.\n" + "Error message was: {}", topicName, retriableException.toString());
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info("Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}", topicsNotReady, retryBackOffMs, deadlineMs - currentWallClockMs);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class InternalTopicManager method validate.
/**
* Validates the internal topics passed.
*
* The validation of the internal topics verifies if the topics:
* - are missing on the brokers
* - have the expected number of partitions
* - have configured a clean-up policy that avoids data loss
*
* @param topicConfigs internal topics to validate
*
* @return validation results that contains
* - the set of missing internal topics on the brokers
* - descriptions of misconfigurations per topic
*/
public ValidationResult validate(final Map<String, InternalTopicConfig> topicConfigs) {
log.info("Starting to validate internal topics {}.", topicConfigs.keySet());
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final ValidationResult validationResult = new ValidationResult();
final Set<String> topicDescriptionsStillToValidate = new HashSet<>(topicConfigs.keySet());
final Set<String> topicConfigsStillToValidate = new HashSet<>(topicConfigs.keySet());
while (!topicDescriptionsStillToValidate.isEmpty() || !topicConfigsStillToValidate.isEmpty()) {
Map<String, KafkaFuture<TopicDescription>> descriptionsForTopic = Collections.emptyMap();
if (!topicDescriptionsStillToValidate.isEmpty()) {
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topicDescriptionsStillToValidate);
descriptionsForTopic = describeTopicsResult.topicNameValues();
}
Map<String, KafkaFuture<Config>> configsForTopic = Collections.emptyMap();
if (!topicConfigsStillToValidate.isEmpty()) {
final DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(topicConfigsStillToValidate.stream().map(topic -> new ConfigResource(Type.TOPIC, topic)).collect(Collectors.toSet()));
configsForTopic = describeConfigsResult.values().entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey().name(), Map.Entry::getValue));
}
while (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
if (!descriptionsForTopic.isEmpty()) {
doValidateTopic(validationResult, descriptionsForTopic, topicConfigs, topicDescriptionsStillToValidate, (streamsSide, brokerSide) -> validatePartitionCount(validationResult, streamsSide, brokerSide));
}
if (!configsForTopic.isEmpty()) {
doValidateTopic(validationResult, configsForTopic, topicConfigs, topicConfigsStillToValidate, (streamsSide, brokerSide) -> validateCleanupPolicy(validationResult, streamsSide, brokerSide));
}
maybeThrowTimeoutException(Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, String.format("Could not validate internal topics within %d milliseconds. " + "This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs));
if (!descriptionsForTopic.isEmpty() || !configsForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(Arrays.asList(topicDescriptionsStillToValidate, topicConfigsStillToValidate), deadline, "validated");
}
log.info("Completed validation of internal topics {}.", topicConfigs.keySet());
return validationResult;
}
Aggregations