use of org.apache.kafka.common.KafkaFuture in project ranger by apache.
the class KafkaRangerTopicCreationTest method testCreateTopic.
@Test
public void testCreateTopic() throws Exception {
final String topic = "test";
Properties properties = new Properties();
properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + port);
properties.put("client.id", "test-consumer-id");
properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
AdminClient client = KafkaAdminClient.create(properties);
CreateTopicsResult result = client.createTopics(Arrays.asList(new NewTopic(topic, 1, (short) 1)));
result.values().get(topic).get();
for (Map.Entry<String, KafkaFuture<Void>> entry : result.values().entrySet()) {
System.out.println("Create Topic : " + entry.getKey() + " " + "isCancelled : " + entry.getValue().isCancelled() + " " + "isCompletedExceptionally : " + entry.getValue().isCompletedExceptionally() + " " + "isDone : " + entry.getValue().isDone());
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDeleteRecords.
@Test
public void testDeleteRecords() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
nodes.put(0, new Node(0, "localhost", 8121));
List<PartitionInfo> partitionInfos = new ArrayList<>();
partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().nodes().get(0));
Map<TopicPartition, DeleteRecordsResponse.PartitionResponse> m = new HashMap<>();
m.put(myTopicPartition0, new DeleteRecordsResponse.PartitionResponse(3, Errors.NONE));
m.put(myTopicPartition1, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.OFFSET_OUT_OF_RANGE));
m.put(myTopicPartition3, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.NOT_LEADER_FOR_PARTITION));
m.put(myTopicPartition4, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.UNKNOWN_TOPIC_OR_PARTITION));
List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 0, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 2, null, Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 3, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 4, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
// success on records deletion for partition 0
Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
long lowWatermark = myTopicPartition0Result.get().lowWatermark();
assertEquals(lowWatermark, 3);
// "offset out of range" failure on records deletion for partition 1
KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
try {
myTopicPartition1Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
}
// "leader not available" failure on metadata request for partition 2
KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
try {
myTopicPartition2Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
}
// "not leader for partition" failure on records deletion for partition 3
KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
try {
myTopicPartition3Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof NotLeaderForPartitionException);
}
// "unknown topic or partition" failure on records deletion for partition 4
KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
try {
myTopicPartition4Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
}
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDeleteAcls.
@Test
public void testDeleteAcls() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().controller());
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(ACL2))), new AclFilterResponse(new ApiError(Errors.SECURITY_DISABLED, "No security"), Collections.<AclDeletionResult>emptySet()))));
DeleteAclsResult results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Map<AclBindingFilter, KafkaFuture<FilterResults>> filterResults = results.values();
FilterResults filter1Results = filterResults.get(FILTER1).get();
assertEquals(null, filter1Results.values().get(0).exception());
assertEquals(ACL1, filter1Results.values().get(0).binding());
assertEquals(null, filter1Results.values().get(1).exception());
assertEquals(ACL2, filter1Results.values().get(1).binding());
assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class);
assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where one deletion result has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(new ApiError(Errors.SECURITY_DISABLED, "No security"), ACL2))), new AclFilterResponse(Collections.<AclDeletionResult>emptySet()))));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
assertTrue(results.values().get(FILTER2).get().values().isEmpty());
assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where there are no errors.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1))), new AclFilterResponse(asList(new AclDeletionResult(ACL2))))));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Collection<AclBinding> deleted = results.all().get();
assertCollectionIs(deleted, ACL1, ACL2);
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testCreatePartitions.
@Test
public void testCreatePartitions() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().controller());
Map<String, ApiError> m = new HashMap<>();
m.put("my_topic", ApiError.NONE);
m.put("other_topic", ApiError.fromThrowable(new InvalidTopicException("some detailed reason")));
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(new CreatePartitionsResponse(0, m));
Map<String, NewPartitions> counts = new HashMap<>();
counts.put("my_topic", NewPartitions.increaseTo(3));
counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3))));
CreatePartitionsResult results = env.adminClient().createPartitions(counts);
Map<String, KafkaFuture<Void>> values = results.values();
KafkaFuture<Void> myTopicResult = values.get("my_topic");
myTopicResult.get();
KafkaFuture<Void> otherTopicResult = values.get("other_topic");
try {
otherTopicResult.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof InvalidTopicException);
InvalidTopicException e = (InvalidTopicException) e0.getCause();
assertEquals("some detailed reason", e.getMessage());
}
}
}
use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class MockAdminClient method createTopics.
@Override
public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
Map<String, KafkaFuture<Void>> createTopicResult = new HashMap<>();
if (timeoutNextRequests > 0) {
for (final NewTopic newTopic : newTopics) {
String topicName = newTopic.name();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
createTopicResult.put(topicName, future);
}
--timeoutNextRequests;
return new CreateTopicsResult(createTopicResult);
}
for (final NewTopic newTopic : newTopics) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
String topicName = newTopic.name();
if (allTopics.containsKey(topicName)) {
future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName)));
createTopicResult.put(topicName, future);
continue;
}
int replicationFactor = newTopic.replicationFactor();
List<Node> replicas = new ArrayList<>(replicationFactor);
for (int i = 0; i < replicationFactor; ++i) {
replicas.add(brokers.get(i));
}
int numberOfPartitions = newTopic.numPartitions();
List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions);
for (int p = 0; p < numberOfPartitions; ++p) {
partitions.add(new TopicPartitionInfo(p, brokers.get(0), replicas, Collections.<Node>emptyList()));
}
allTopics.put(topicName, new TopicMetadata(false, partitions, newTopic.configs()));
future.complete(null);
createTopicResult.put(topicName, future);
}
return new CreateTopicsResult(createTopicResult);
}
Aggregations