use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testCreatePartitions.
@Test
public void testCreatePartitions() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().controller());
Map<String, ApiError> m = new HashMap<>();
m.put("my_topic", ApiError.NONE);
m.put("other_topic", ApiError.fromThrowable(new InvalidTopicException("some detailed reason")));
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(new CreatePartitionsResponse(0, m));
Map<String, NewPartitions> counts = new HashMap<>();
counts.put("my_topic", NewPartitions.increaseTo(3));
counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3))));
CreatePartitionsResult results = env.adminClient().createPartitions(counts);
Map<String, KafkaFuture<Void>> values = results.values();
KafkaFuture<Void> myTopicResult = values.get("my_topic");
myTopicResult.get();
KafkaFuture<Void> otherTopicResult = values.get("other_topic");
try {
otherTopicResult.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof InvalidTopicException);
InvalidTopicException e = (InvalidTopicException) e0.getCause();
assertEquals("some detailed reason", e.getMessage());
}
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class MockAdminClient method createTopics.
@Override
public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
Map<String, KafkaFuture<Void>> createTopicResult = new HashMap<>();
if (timeoutNextRequests > 0) {
for (final NewTopic newTopic : newTopics) {
String topicName = newTopic.name();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
createTopicResult.put(topicName, future);
}
--timeoutNextRequests;
return new CreateTopicsResult(createTopicResult);
}
for (final NewTopic newTopic : newTopics) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
String topicName = newTopic.name();
if (allTopics.containsKey(topicName)) {
future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName)));
createTopicResult.put(topicName, future);
continue;
}
int replicationFactor = newTopic.replicationFactor();
List<Node> replicas = new ArrayList<>(replicationFactor);
for (int i = 0; i < replicationFactor; ++i) {
replicas.add(brokers.get(i));
}
int numberOfPartitions = newTopic.numPartitions();
List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions);
for (int p = 0; p < numberOfPartitions; ++p) {
partitions.add(new TopicPartitionInfo(p, brokers.get(0), replicas, Collections.<Node>emptyList()));
}
allTopics.put(topicName, new TopicMetadata(false, partitions, newTopic.configs()));
future.complete(null);
createTopicResult.put(topicName, future);
}
return new CreateTopicsResult(createTopicResult);
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class MockAdminClient method describeTopics.
@Override
public DescribeTopicsResult describeTopics(Collection<String> topicNames, DescribeTopicsOptions options) {
Map<String, KafkaFuture<TopicDescription>> topicDescriptions = new HashMap<>();
if (timeoutNextRequests > 0) {
for (String requestedTopic : topicNames) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
topicDescriptions.put(requestedTopic, future);
}
--timeoutNextRequests;
return new DescribeTopicsResult(topicDescriptions);
}
for (String requestedTopic : topicNames) {
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
if (topicName.equals(requestedTopic)) {
TopicMetadata topicMetadata = topicDescription.getValue();
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.complete(new TopicDescription(topicName, topicMetadata.isInternalTopic, topicMetadata.partitions));
topicDescriptions.put(topicName, future);
break;
}
}
if (!topicDescriptions.containsKey(requestedTopic)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new UnknownTopicOrPartitionException(String.format("Topic %s unknown.", requestedTopic)));
topicDescriptions.put(requestedTopic, future);
}
}
return new DescribeTopicsResult(topicDescriptions);
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class InternalTopicManager method getNumPartitions.
/**
* Get the number of partitions for the given topics
*/
// visible for testing
protected Map<String, Integer> getNumPartitions(final Set<String> topics) {
int remainingRetries = retries;
boolean retry;
do {
retry = false;
final DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(topics);
final Map<String, KafkaFuture<TopicDescription>> futures = describeTopicsResult.values();
final Map<String, Integer> existingNumberOfPartitionsPerTopic = new HashMap<>();
for (final Map.Entry<String, KafkaFuture<TopicDescription>> topicFuture : futures.entrySet()) {
try {
final TopicDescription topicDescription = topicFuture.getValue().get();
existingNumberOfPartitionsPerTopic.put(topicFuture.getKey(), topicDescription.partitions().size());
} catch (final InterruptedException fatalException) {
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException couldNotDescribeTopicException) {
final Throwable cause = couldNotDescribeTopicException.getCause();
if (cause instanceof TimeoutException) {
retry = true;
log.debug("Could not get number of partitions for topic {} due to timeout. " + "Will try again (remaining retries {}).", topicFuture.getKey(), remainingRetries - 1);
} else {
final String error = "Could not get number of partitions for topic {}.";
log.debug(error, topicFuture.getKey(), cause.getMessage());
}
}
}
if (retry) {
topics.removeAll(existingNumberOfPartitionsPerTopic.keySet());
continue;
}
return existingNumberOfPartitionsPerTopic;
} while (remainingRetries-- > 0);
return Collections.emptyMap();
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class StreamsResetter method doDelete.
// visible for testing
public void doDelete(final List<String> topicsToDelete, final AdminClient adminClient) {
boolean hasDeleteErrors = false;
final DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(topicsToDelete);
final Map<String, KafkaFuture<Void>> results = deleteTopicsResult.values();
for (final Map.Entry<String, KafkaFuture<Void>> entry : results.entrySet()) {
try {
entry.getValue().get(30, TimeUnit.SECONDS);
} catch (Exception e) {
System.err.println("ERROR: deleting topic " + entry.getKey());
e.printStackTrace(System.err);
hasDeleteErrors = true;
}
}
if (hasDeleteErrors) {
throw new RuntimeException("Encountered an error deleting one or more topics");
}
}
Aggregations