use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method alterConfigs.
@Override
public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options) {
final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
// We must make a separate AlterConfigs request for every BROKER resource we want to alter
// and send the request to that specific broker. Other resources are grouped together into
// a single request that may be sent to any broker.
final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
for (ConfigResource resource : configs.keySet()) {
if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
NodeProvider nodeProvider = new ConstantNodeIdProvider(Integer.parseInt(resource.name()));
allFutures.putAll(alterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
} else
unifiedRequestResources.add(resource);
}
if (!unifiedRequestResources.isEmpty())
allFutures.putAll(alterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider()));
return new AlterConfigsResult(new HashMap<ConfigResource, KafkaFuture<Void>>(allFutures));
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testUpdateFeatures.
private void testUpdateFeatures(Map<String, FeatureUpdate> featureUpdates, ApiError topLevelError, Map<String, ApiError> featureUpdateErrors) throws Exception {
try (final AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().prepareResponse(body -> body instanceof UpdateFeaturesRequest, UpdateFeaturesResponse.createWithErrors(topLevelError, featureUpdateErrors, 0));
final Map<String, KafkaFuture<Void>> futures = env.adminClient().updateFeatures(featureUpdates, new UpdateFeaturesOptions().timeoutMs(10000)).values();
for (final Map.Entry<String, KafkaFuture<Void>> entry : futures.entrySet()) {
final KafkaFuture<Void> future = entry.getValue();
final ApiError error = featureUpdateErrors.get(entry.getKey());
if (topLevelError.error() == Errors.NONE) {
assertNotNull(error);
if (error.error() == Errors.NONE) {
future.get();
} else {
final ExecutionException e = assertThrows(ExecutionException.class, future::get);
assertEquals(e.getCause().getClass(), error.exception().getClass());
}
} else {
final ExecutionException e = assertThrows(ExecutionException.class, future::get);
assertEquals(e.getCause().getClass(), topLevelError.exception().getClass());
}
}
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeReplicaLogDirsUnexpected.
@Test
public void testDescribeReplicaLogDirsUnexpected() throws ExecutionException, InterruptedException {
TopicPartitionReplica expected = new TopicPartitionReplica("topic", 12, 1);
TopicPartitionReplica unexpected = new TopicPartitionReplica("topic", 12, 2);
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
String broker1log0 = "/var/data/kafka0";
String broker1log1 = "/var/data/kafka1";
int broker1Log0PartitionSize = 987654321;
int broker1Log0OffsetLag = 24;
int broker1Log1PartitionSize = 123456789;
int broker1Log1OffsetLag = 4321;
env.kafkaClient().prepareResponseFrom(new DescribeLogDirsResponse(new DescribeLogDirsResponseData().setResults(asList(prepareDescribeLogDirsResult(expected, broker1log0, broker1Log0PartitionSize, broker1Log0OffsetLag, false), prepareDescribeLogDirsResult(unexpected, broker1log1, broker1Log1PartitionSize, broker1Log1OffsetLag, true)))), env.cluster().nodeById(expected.brokerId()));
DescribeReplicaLogDirsResult result = env.adminClient().describeReplicaLogDirs(asList(expected));
Map<TopicPartitionReplica, KafkaFuture<DescribeReplicaLogDirsResult.ReplicaLogDirInfo>> values = result.values();
assertEquals(TestUtils.toSet(asList(expected)), values.keySet());
assertNotNull(values.get(expected));
assertEquals(broker1log0, values.get(expected).get().getCurrentReplicaLogDir());
assertEquals(broker1Log0OffsetLag, values.get(expected).get().getCurrentReplicaOffsetLag());
assertEquals(broker1log1, values.get(expected).get().getFutureReplicaLogDir());
assertEquals(broker1Log1OffsetLag, values.get(expected).get().getFutureReplicaOffsetLag());
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeLogDirsDeprecated.
@SuppressWarnings("deprecation")
@Test
public void testDescribeLogDirsDeprecated() throws ExecutionException, InterruptedException {
Set<Integer> brokers = singleton(0);
TopicPartition tp = new TopicPartition("topic", 12);
String logDir = "/var/data/kafka";
Errors error = Errors.NONE;
int offsetLag = 24;
long partitionSize = 1234567890;
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(prepareDescribeLogDirsResponse(error, logDir, tp, partitionSize, offsetLag), env.cluster().nodeById(0));
DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers);
Map<Integer, KafkaFuture<Map<String, DescribeLogDirsResponse.LogDirInfo>>> deprecatedValues = result.values();
assertEquals(brokers, deprecatedValues.keySet());
assertNotNull(deprecatedValues.get(0));
assertDescriptionContains(deprecatedValues.get(0).get(), logDir, tp, error, offsetLag, partitionSize);
Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> deprecatedAll = result.all().get();
assertEquals(brokers, deprecatedAll.keySet());
assertDescriptionContains(deprecatedAll.get(0), logDir, tp, error, offsetLag, partitionSize);
}
}
use of org.apache.kafka.common.KafkaFuture in project kafka by apache.
the class KafkaAdminClientTest method testDescribeLogDirs.
@Test
public void testDescribeLogDirs() throws ExecutionException, InterruptedException {
Set<Integer> brokers = singleton(0);
String logDir = "/var/data/kafka";
TopicPartition tp = new TopicPartition("topic", 12);
long partitionSize = 1234567890;
long offsetLag = 24;
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(prepareDescribeLogDirsResponse(Errors.NONE, logDir, tp, partitionSize, offsetLag), env.cluster().nodeById(0));
DescribeLogDirsResult result = env.adminClient().describeLogDirs(brokers);
Map<Integer, KafkaFuture<Map<String, LogDirDescription>>> descriptions = result.descriptions();
assertEquals(brokers, descriptions.keySet());
assertNotNull(descriptions.get(0));
assertDescriptionContains(descriptions.get(0).get(), logDir, tp, partitionSize, offsetLag);
Map<Integer, Map<String, LogDirDescription>> allDescriptions = result.allDescriptions().get();
assertEquals(brokers, allDescriptions.keySet());
assertDescriptionContains(allDescriptions.get(0), logDir, tp, partitionSize, offsetLag);
// Empty results when not authorized with version < 3
env.kafkaClient().prepareResponseFrom(prepareEmptyDescribeLogDirsResponse(Optional.empty()), env.cluster().nodeById(0));
final DescribeLogDirsResult errorResult = env.adminClient().describeLogDirs(brokers);
ExecutionException exception = assertThrows(ExecutionException.class, () -> errorResult.allDescriptions().get());
assertTrue(exception.getCause() instanceof ClusterAuthorizationException);
// Empty results with an error with version >= 3
env.kafkaClient().prepareResponseFrom(prepareEmptyDescribeLogDirsResponse(Optional.of(Errors.UNKNOWN_SERVER_ERROR)), env.cluster().nodeById(0));
final DescribeLogDirsResult errorResult2 = env.adminClient().describeLogDirs(brokers);
exception = assertThrows(ExecutionException.class, () -> errorResult2.allDescriptions().get());
assertTrue(exception.getCause() instanceof UnknownServerException);
}
}
Aggregations