use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class InternalTopicManager method cleanUpCreatedTopics.
private void cleanUpCreatedTopics(final Set<String> topicsToCleanUp) {
log.info("Starting to clean up internal topics {}.", topicsToCleanUp);
final long now = time.milliseconds();
final long deadline = now + retryTimeoutMs;
final Set<String> topicsStillToCleanup = new HashSet<>(topicsToCleanUp);
while (!topicsStillToCleanup.isEmpty()) {
log.info("Going to cleanup internal topics: " + topicsStillToCleanup);
final DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(topicsStillToCleanup);
final Map<String, KafkaFuture<Void>> deleteResultForTopic = deleteTopicsResult.topicNameValues();
while (!deleteResultForTopic.isEmpty()) {
for (final String topicName : new HashSet<>(topicsStillToCleanup)) {
if (!deleteResultForTopic.containsKey(topicName)) {
throw new IllegalStateException("Delete topic results do not contain internal topic " + topicName + " to clean up. " + BUG_ERROR_MESSAGE);
}
final KafkaFuture<Void> deleteResult = deleteResultForTopic.get(topicName);
if (deleteResult.isDone()) {
try {
deleteResult.get();
topicsStillToCleanup.remove(topicName);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof UnknownTopicOrPartitionException) {
log.info("Internal topic {} to clean up is missing", topicName);
} else if (cause instanceof LeaderNotAvailableException) {
log.info("The leader of internal topic {} to clean up is not available.", topicName);
} else if (cause instanceof TimeoutException) {
log.info("Cleaning up internal topic {} timed out.", topicName);
} else {
log.error("Unexpected error during cleanup of internal topics: ", cause);
throw new StreamsException(String.format("Could not clean up internal topics %s, because during the cleanup " + "of topic %s the following error occurred: ", topicsStillToCleanup, topicName), cause);
}
} catch (final InterruptedException interruptedException) {
throw new InterruptException(interruptedException);
} finally {
deleteResultForTopic.remove(topicName);
}
}
}
maybeThrowTimeoutException(Collections.singletonList(topicsStillToCleanup), deadline, String.format("Could not cleanup internal topics within %d milliseconds. This can happen if the " + "Kafka cluster is temporarily not available or the broker did not complete topic creation " + "before the cleanup. The following internal topics could not be cleaned up: %s", retryTimeoutMs, topicsStillToCleanup));
if (!deleteResultForTopic.isEmpty()) {
Utils.sleep(100);
}
}
maybeSleep(Collections.singletonList(topicsStillToCleanup), deadline, "validated");
}
log.info("Completed cleanup of internal topics {}.", topicsToCleanUp);
}
use of org.apache.kafka.common.KafkaFuture in project strimzi by strimzi.
the class BaseKafkaImpl method topicMetadata.
/**
* Get a topic config via the Kafka AdminClient API, calling the given handler
* (in a different thread) with the result.
*/
@Override
public void topicMetadata(TopicName topicName, Handler<AsyncResult<TopicMetadata>> handler) {
LOGGER.debug("Getting metadata for topic {}", topicName);
ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName.toString());
KafkaFuture<TopicDescription> descriptionFuture = adminClient.describeTopics(Collections.singleton(topicName.toString())).values().get(topicName.toString());
KafkaFuture<Config> configFuture = adminClient.describeConfigs(Collections.singleton(resource)).values().get(resource);
queueWork(new MetadataWork(descriptionFuture, configFuture, result -> handler.handle(result)));
}
use of org.apache.kafka.common.KafkaFuture in project ksql by confluentinc.
the class KafkaTopicClientImplTest method getDeleteInternalTopicsResult.
private DeleteTopicsResult getDeleteInternalTopicsResult() {
DeleteTopicsResult deleteTopicsResult = mock(DeleteTopicsResult.class);
Map<String, KafkaFuture<Void>> deletedTopics = new HashMap<>();
deletedTopics.put(internalTopic1, KafkaFuture.allOf());
deletedTopics.put(internalTopic2, KafkaFuture.allOf());
expect(deleteTopicsResult.values()).andReturn(deletedTopics);
replay(deleteTopicsResult);
return deleteTopicsResult;
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDeleteRecords.
@Test
public void testDeleteRecords() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
nodes.put(0, new Node(0, "localhost", 8121));
List<PartitionInfo> partitionInfos = new ArrayList<>();
partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().nodes().get(0));
Map<TopicPartition, DeleteRecordsResponse.PartitionResponse> m = new HashMap<>();
m.put(myTopicPartition0, new DeleteRecordsResponse.PartitionResponse(3, Errors.NONE));
m.put(myTopicPartition1, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.OFFSET_OUT_OF_RANGE));
m.put(myTopicPartition3, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.NOT_LEADER_FOR_PARTITION));
m.put(myTopicPartition4, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.UNKNOWN_TOPIC_OR_PARTITION));
List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 0, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 2, null, Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 3, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 4, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
// success on records deletion for partition 0
Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
long lowWatermark = myTopicPartition0Result.get().lowWatermark();
assertEquals(lowWatermark, 3);
// "offset out of range" failure on records deletion for partition 1
KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
try {
myTopicPartition1Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
}
// "leader not available" failure on metadata request for partition 2
KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
try {
myTopicPartition2Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
}
// "not leader for partition" failure on records deletion for partition 3
KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
try {
myTopicPartition3Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof NotLeaderForPartitionException);
}
// "unknown topic or partition" failure on records deletion for partition 4
KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
try {
myTopicPartition4Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
}
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDeleteAcls.
@Test
public void testDeleteAcls() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().controller());
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(ACL2))), new AclFilterResponse(new ApiError(Errors.SECURITY_DISABLED, "No security"), Collections.<AclDeletionResult>emptySet()))));
DeleteAclsResult results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Map<AclBindingFilter, KafkaFuture<FilterResults>> filterResults = results.values();
FilterResults filter1Results = filterResults.get(FILTER1).get();
assertEquals(null, filter1Results.values().get(0).exception());
assertEquals(ACL1, filter1Results.values().get(0).binding());
assertEquals(null, filter1Results.values().get(1).exception());
assertEquals(ACL2, filter1Results.values().get(1).binding());
assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class);
assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where one deletion result has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(new ApiError(Errors.SECURITY_DISABLED, "No security"), ACL2))), new AclFilterResponse(Collections.<AclDeletionResult>emptySet()))));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
assertTrue(results.values().get(FILTER2).get().values().isEmpty());
assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where there are no errors.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1))), new AclFilterResponse(asList(new AclDeletionResult(ACL2))))));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Collection<AclBinding> deleted = results.all().get();
assertCollectionIs(deleted, ACL1, ACL2);
}
}
Aggregations