use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testAlterUserScramCredentials.
@Test
public void testAlterUserScramCredentials() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
final String user0Name = "user0";
ScramMechanism user0ScramMechanism0 = ScramMechanism.SCRAM_SHA_256;
ScramMechanism user0ScramMechanism1 = ScramMechanism.SCRAM_SHA_512;
final String user1Name = "user1";
ScramMechanism user1ScramMechanism0 = ScramMechanism.SCRAM_SHA_256;
final String user2Name = "user2";
ScramMechanism user2ScramMechanism0 = ScramMechanism.SCRAM_SHA_512;
AlterUserScramCredentialsResponseData responseData = new AlterUserScramCredentialsResponseData();
responseData.setResults(Arrays.asList(user0Name, user1Name, user2Name).stream().map(u -> new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult().setUser(u).setErrorCode(Errors.NONE.code())).collect(Collectors.toList()));
env.kafkaClient().prepareResponse(new AlterUserScramCredentialsResponse(responseData));
AlterUserScramCredentialsResult result = env.adminClient().alterUserScramCredentials(Arrays.asList(new UserScramCredentialDeletion(user0Name, user0ScramMechanism0), new UserScramCredentialUpsertion(user0Name, new ScramCredentialInfo(user0ScramMechanism1, 8192), "password"), new UserScramCredentialUpsertion(user1Name, new ScramCredentialInfo(user1ScramMechanism0, 8192), "password"), new UserScramCredentialDeletion(user2Name, user2ScramMechanism0)));
Map<String, KafkaFuture<Void>> resultData = result.values();
assertEquals(3, resultData.size());
Arrays.asList(user0Name, user1Name, user2Name).stream().forEach(u -> {
assertTrue(resultData.containsKey(u));
assertFalse(resultData.get(u).isCompletedExceptionally());
});
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDeleteAcls.
@Test
public void testDeleteAcls() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
// Test a call where one filter has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData().setThrottleTimeMs(0).setFilterResults(asList(new DeleteAclsResponseData.DeleteAclsFilterResult().setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE), DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))), new DeleteAclsResponseData.DeleteAclsFilterResult().setErrorCode(Errors.SECURITY_DISABLED.code()).setErrorMessage("No security"))), ApiKeys.DELETE_ACLS.latestVersion()));
DeleteAclsResult results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Map<AclBindingFilter, KafkaFuture<FilterResults>> filterResults = results.values();
FilterResults filter1Results = filterResults.get(FILTER1).get();
assertNull(filter1Results.values().get(0).exception());
assertEquals(ACL1, filter1Results.values().get(0).binding());
assertNull(filter1Results.values().get(1).exception());
assertEquals(ACL2, filter1Results.values().get(1).binding());
TestUtils.assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class);
TestUtils.assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where one deletion result has an error.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData().setThrottleTimeMs(0).setFilterResults(asList(new DeleteAclsResponseData.DeleteAclsFilterResult().setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE), new DeleteAclsResponseData.DeleteAclsMatchingAcl().setErrorCode(Errors.SECURITY_DISABLED.code()).setErrorMessage("No security").setPermissionType(AclPermissionType.ALLOW.code()).setOperation(AclOperation.ALTER.code()).setResourceType(ResourceType.CLUSTER.code()).setPatternType(FILTER2.patternFilter().patternType().code()))), new DeleteAclsResponseData.DeleteAclsFilterResult())), ApiKeys.DELETE_ACLS.latestVersion()));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
assertTrue(results.values().get(FILTER2).get().values().isEmpty());
TestUtils.assertFutureError(results.all(), SecurityDisabledException.class);
// Test a call where there are no errors.
env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData().setThrottleTimeMs(0).setFilterResults(asList(new DeleteAclsResponseData.DeleteAclsFilterResult().setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL1, ApiError.NONE))), new DeleteAclsResponseData.DeleteAclsFilterResult().setMatchingAcls(asList(DeleteAclsResponse.matchingAcl(ACL2, ApiError.NONE))))), ApiKeys.DELETE_ACLS.latestVersion()));
results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
Collection<AclBinding> deleted = results.all().get();
assertCollectionIs(deleted, ACL1, ACL2);
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDeleteRecords.
@Test
public void testDeleteRecords() throws Exception {
HashMap<Integer, Node> nodes = new HashMap<>();
nodes.put(0, new Node(0, "localhost", 8121));
List<PartitionInfo> partitionInfos = new ArrayList<>();
partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
DeleteRecordsResponseData m = new DeleteRecordsResponseData();
m.topics().add(new DeleteRecordsResponseData.DeleteRecordsTopicResult().setName(myTopicPartition0.topic()).setPartitions(new DeleteRecordsResponseData.DeleteRecordsPartitionResultCollection(asList(new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition0.partition()).setLowWatermark(3).setErrorCode(Errors.NONE.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition1.partition()).setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition3.partition()).setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK).setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()), new DeleteRecordsResponseData.DeleteRecordsPartitionResult().setPartitionIndex(myTopicPartition4.partition()).setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK).setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code())).iterator())));
List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition0, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition1, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, myTopicPartition2, Optional.empty(), Optional.empty(), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition3, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, myTopicPartition4, Optional.of(nodes.get(0).id()), Optional.of(5), singletonList(nodes.get(0).id()), singletonList(nodes.get(0).id()), Collections.emptyList()));
t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
env.kafkaClient().prepareResponse(RequestTestUtils.metadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
env.kafkaClient().prepareResponse(new DeleteRecordsResponse(m));
Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
// success on records deletion for partition 0
Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
long lowWatermark = myTopicPartition0Result.get().lowWatermark();
assertEquals(lowWatermark, 3);
// "offset out of range" failure on records deletion for partition 1
KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
try {
myTopicPartition1Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e0) {
assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
}
// "leader not available" failure on metadata request for partition 2
KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
try {
myTopicPartition2Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
}
// "not leader for partition" failure on records deletion for partition 3
KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
try {
myTopicPartition3Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof NotLeaderOrFollowerException);
}
// "unknown topic or partition" failure on records deletion for partition 4
KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
try {
myTopicPartition4Result.get();
fail("get() should throw ExecutionException");
} catch (ExecutionException e1) {
assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
}
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeConfigsUnrequested.
@Test
public void testDescribeConfigsUnrequested() throws Exception {
ConfigResource topic = new ConfigResource(ConfigResource.Type.TOPIC, "topic");
ConfigResource unrequested = new ConfigResource(ConfigResource.Type.TOPIC, "unrequested");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(topic.name()).setResourceType(topic.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList()), new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(unrequested.name()).setResourceType(unrequested.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(topic)).values();
assertEquals(new HashSet<>(asList(topic)), result.keySet());
assertNotNull(result.get(topic).get());
assertNull(result.get(unrequested));
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeBrokerAndLogConfigs.
@Test
public void testDescribeBrokerAndLogConfigs() throws Exception {
ConfigResource brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "0");
ConfigResource brokerLoggerResource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "0");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(brokerResource.name()).setResourceType(brokerResource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList()), new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(brokerLoggerResource.name()).setResourceType(brokerLoggerResource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))), env.cluster().nodeById(0));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(brokerResource, brokerLoggerResource)).values();
assertEquals(new HashSet<>(asList(brokerResource, brokerLoggerResource)), result.keySet());
result.get(brokerResource).get();
result.get(brokerLoggerResource).get();
}
}
Aggregations