use of org.apache.kafka.common.KafkaFuture in project core-ng-project by neowu.
the class KafkaController method topics.
public Response topics(Request request) throws ExecutionException, InterruptedException {
ControllerHelper.assertFromLocalNetwork(request.clientIP());
List<KafkaTopic> views = Lists.newArrayList();
try (AdminClient admin = kafka.admin()) {
Set<String> topics = admin.listTopics().names().get();
DescribeTopicsResult descriptions = admin.describeTopics(topics);
for (Map.Entry<String, KafkaFuture<TopicDescription>> entry : descriptions.values().entrySet()) {
String name = entry.getKey();
TopicDescription description = entry.getValue().get();
KafkaTopic view = view(name, description);
views.add(view);
}
}
return Response.bean(views);
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class DescribeUserScramCredentialsResult method description.
/**
* @param userName the name of the user description being requested
* @return a future indicating the description results for the given user. The future will complete exceptionally if
* the future returned by {@link #users()} completes exceptionally. Note that if the given user does not exist in
* the list of described users then the returned future will complete exceptionally with
* {@link org.apache.kafka.common.errors.ResourceNotFoundException}.
*/
public KafkaFuture<UserScramCredentialsDescription> description(String userName) {
final KafkaFutureImpl<UserScramCredentialsDescription> retval = new KafkaFutureImpl<>();
dataFuture.whenComplete((data, throwable) -> {
if (throwable != null) {
retval.completeExceptionally(throwable);
} else {
// it is possible that there is no future for this user (for example, the original describe request was
// for users 1, 2, and 3 but this is looking for user 4), so explicitly take care of that case
Optional<DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult> optionalUserResult = data.results().stream().filter(result -> result.user().equals(userName)).findFirst();
if (!optionalUserResult.isPresent()) {
retval.completeExceptionally(new ResourceNotFoundException("No such user: " + userName));
} else {
DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult userResult = optionalUserResult.get();
if (userResult.errorCode() != Errors.NONE.code()) {
// RESOURCE_NOT_FOUND is included here
retval.completeExceptionally(Errors.forCode(userResult.errorCode()).exception(userResult.errorMessage()));
} else {
retval.complete(new UserScramCredentialsDescription(userResult.user(), getScramCredentialInfosFor(userResult)));
}
}
}
});
return retval;
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class MirrorCheckpointTask method refreshIdleConsumerGroupOffset.
private void refreshIdleConsumerGroupOffset() {
Map<String, KafkaFuture<ConsumerGroupDescription>> consumerGroupsDesc = targetAdminClient.describeConsumerGroups(consumerGroups).describedGroups();
for (String group : consumerGroups) {
try {
ConsumerGroupDescription consumerGroupDesc = consumerGroupsDesc.get(group).get();
ConsumerGroupState consumerGroupState = consumerGroupDesc.state();
// (2) dead: the new consumer that is recently created at source and never exist at target
if (consumerGroupState.equals(ConsumerGroupState.EMPTY)) {
idleConsumerGroupsOffset.put(group, targetAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get().entrySet().stream().collect(Collectors.toMap(Entry::getKey, Entry::getValue)));
}
// new consumer upstream has state "DEAD" and will be identified during the offset sync-up
} catch (InterruptedException | ExecutionException e) {
log.error("Error querying for consumer group {} on cluster {}.", group, targetClusterAlias, e);
}
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testAlterUserScramCredentialsUnknownMechanism.
@Test
public void testAlterUserScramCredentialsUnknownMechanism() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
final String user0Name = "user0";
ScramMechanism user0ScramMechanism0 = ScramMechanism.UNKNOWN;
final String user1Name = "user1";
ScramMechanism user1ScramMechanism0 = ScramMechanism.UNKNOWN;
final String user2Name = "user2";
ScramMechanism user2ScramMechanism0 = ScramMechanism.SCRAM_SHA_256;
AlterUserScramCredentialsResponseData responseData = new AlterUserScramCredentialsResponseData();
responseData.setResults(Arrays.asList(new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult().setUser(user2Name)));
env.kafkaClient().prepareResponse(new AlterUserScramCredentialsResponse(responseData));
AlterUserScramCredentialsResult result = env.adminClient().alterUserScramCredentials(Arrays.asList(new UserScramCredentialDeletion(user0Name, user0ScramMechanism0), new UserScramCredentialUpsertion(user1Name, new ScramCredentialInfo(user1ScramMechanism0, 8192), "password"), new UserScramCredentialUpsertion(user2Name, new ScramCredentialInfo(user2ScramMechanism0, 4096), "password")));
Map<String, KafkaFuture<Void>> resultData = result.values();
assertEquals(3, resultData.size());
Arrays.asList(user0Name, user1Name).stream().forEach(u -> {
assertTrue(resultData.containsKey(u));
try {
resultData.get(u).get();
fail("Expected request for user " + u + " to complete exceptionally, but it did not");
} catch (Exception expected) {
// ignore
}
});
assertTrue(resultData.containsKey(user2Name));
try {
resultData.get(user2Name).get();
} catch (Exception e) {
fail("Expected request for user " + user2Name + " to NOT complete excdptionally, but it did");
}
try {
result.all().get();
fail("Expected 'result.all().get()' to throw an exception since at least one user failed, but it did not");
} catch (final Exception expected) {
// ignore, expected
}
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeLogDirsOfflineDirDeprecated.
@SuppressWarnings("deprecation")
@Test
public void testDescribeLogDirsOfflineDirDeprecated() throws ExecutionException, InterruptedException {
Set<Integer> brokers = singleton(0);
String logDir = "/var/data/kafka";
Errors error = Errors.KAFKA_STORAGE_ERROR;
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(prepareDescribeLogDirsResponse(error, logDir, emptyList()), env.cluster().nodeById(0));
DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers);
Map<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>> deprecatedValues = result.values();
assertEquals(brokers, deprecatedValues.keySet());
assertNotNull(deprecatedValues.get(0));
Map<String, DescribeLogDirsResponse.LogDirInfo> valuesMap = deprecatedValues.get(0).get();
assertEquals(singleton(logDir), valuesMap.keySet());
assertEquals(error, valuesMap.get(logDir).error);
assertEquals(emptySet(), valuesMap.get(logDir).replicaInfos.keySet());
Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> deprecatedAll = result.all().get();
assertEquals(brokers, deprecatedAll.keySet());
Map<String, DescribeLogDirsResponse.LogDirInfo> allMap = deprecatedAll.get(0);
assertNotNull(allMap);
assertEquals(singleton(logDir), allMap.keySet());
assertEquals(error, allMap.get(logDir).error);
assertEquals(emptySet(), allMap.get(logDir).replicaInfos.keySet());
}
}
Aggregations