use of org.apache.kafka.common.config.ConfigResource in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClientTest method testDescribeConfigs.
@Test
public void testDescribeConfigs() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
env.kafkaClient().setNode(env.cluster().controller());
env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new org.apache.kafka.common.requests.Resource(BROKER, "0"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.<DescribeConfigsResponse.ConfigEntry>emptySet()))));
DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, "0")));
result2.all().get();
}
}
use of org.apache.kafka.common.config.ConfigResource in project apache-kafka-on-k8s by banzaicloud.
the class KafkaAdminClient method alterConfigs.
private Map<ConfigResource, KafkaFutureImpl<Void>> alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options, Collection<ConfigResource> resources, NodeProvider nodeProvider) {
final Map<ConfigResource, KafkaFutureImpl<Void>> futures = new HashMap<>();
final Map<Resource, AlterConfigsRequest.Config> requestMap = new HashMap<>(resources.size());
for (ConfigResource resource : resources) {
List<AlterConfigsRequest.ConfigEntry> configEntries = new ArrayList<>();
for (ConfigEntry configEntry : configs.get(resource).entries()) configEntries.add(new AlterConfigsRequest.ConfigEntry(configEntry.name(), configEntry.value()));
requestMap.put(configResourceToResource(resource), new AlterConfigsRequest.Config(configEntries));
futures.put(resource, new KafkaFutureImpl<Void>());
}
final long now = time.milliseconds();
runnable.call(new Call("alterConfigs", calcDeadlineMs(now, options.timeoutMs()), nodeProvider) {
@Override
public AbstractRequest.Builder createRequest(int timeoutMs) {
return new AlterConfigsRequest.Builder(requestMap, options.shouldValidateOnly());
}
@Override
public void handleResponse(AbstractResponse abstractResponse) {
AlterConfigsResponse response = (AlterConfigsResponse) abstractResponse;
for (Map.Entry<ConfigResource, KafkaFutureImpl<Void>> entry : futures.entrySet()) {
KafkaFutureImpl<Void> future = entry.getValue();
ApiException exception = response.errors().get(configResourceToResource(entry.getKey())).exception();
if (exception != null) {
future.completeExceptionally(exception);
} else {
future.complete(null);
}
}
}
@Override
void handleFailure(Throwable throwable) {
completeAllExceptionally(futures.values(), throwable);
}
}, now);
return futures;
}
use of org.apache.kafka.common.config.ConfigResource in project apache-kafka-on-k8s by banzaicloud.
the class InternalTopicManagerTest method shouldCreateRequiredTopics.
@Test
public void shouldCreateRequiredTopics() throws Exception {
final InternalTopicConfig topicConfig = new RepartitionTopicConfig(topic, Collections.<String, String>emptyMap());
topicConfig.setNumberOfPartitions(1);
final InternalTopicConfig topicConfig2 = new UnwindowedChangelogTopicConfig(topic2, Collections.<String, String>emptyMap());
topicConfig2.setNumberOfPartitions(1);
final InternalTopicConfig topicConfig3 = new WindowedChangelogTopicConfig(topic3, Collections.<String, String>emptyMap());
topicConfig3.setNumberOfPartitions(1);
internalTopicManager.makeReady(Collections.singletonMap(topic, topicConfig));
internalTopicManager.makeReady(Collections.singletonMap(topic2, topicConfig2));
internalTopicManager.makeReady(Collections.singletonMap(topic3, topicConfig3));
assertEquals(Utils.mkSet(topic, topic2, topic3), mockAdminClient.listTopics().names().get());
assertEquals(new TopicDescription(topic, false, new ArrayList<TopicPartitionInfo>() {
{
add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.<Node>emptyList()));
}
}), mockAdminClient.describeTopics(Collections.singleton(topic)).values().get(topic).get());
assertEquals(new TopicDescription(topic2, false, new ArrayList<TopicPartitionInfo>() {
{
add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.<Node>emptyList()));
}
}), mockAdminClient.describeTopics(Collections.singleton(topic2)).values().get(topic2).get());
assertEquals(new TopicDescription(topic3, false, new ArrayList<TopicPartitionInfo>() {
{
add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.<Node>emptyList()));
}
}), mockAdminClient.describeTopics(Collections.singleton(topic3)).values().get(topic3).get());
ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topic);
ConfigResource resource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2);
ConfigResource resource3 = new ConfigResource(ConfigResource.Type.TOPIC, topic3);
assertEquals(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE), mockAdminClient.describeConfigs(Collections.singleton(resource)).values().get(resource).get().get(TopicConfig.CLEANUP_POLICY_CONFIG));
assertEquals(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), mockAdminClient.describeConfigs(Collections.singleton(resource2)).values().get(resource2).get().get(TopicConfig.CLEANUP_POLICY_CONFIG));
assertEquals(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE), mockAdminClient.describeConfigs(Collections.singleton(resource3)).values().get(resource3).get().get(TopicConfig.CLEANUP_POLICY_CONFIG));
}
use of org.apache.kafka.common.config.ConfigResource in project debezium by debezium.
the class KafkaDatabaseHistory method getKafkaBrokerConfig.
private Config getKafkaBrokerConfig(AdminClient admin) throws Exception {
final Collection<Node> nodes = admin.describeCluster().nodes().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
if (nodes.isEmpty()) {
throw new ConnectException("No brokers available to obtain default settings");
}
String nodeId = nodes.iterator().next().idString();
Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, nodeId));
final Map<ConfigResource, Config> configs = admin.describeConfigs(resources).all().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
if (configs.isEmpty()) {
throw new ConnectException("No configs have been received");
}
return configs.values().iterator().next();
}
use of org.apache.kafka.common.config.ConfigResource in project kafka by apache.
the class KafkaAdminClientTest method testDescribeBrokerConfigs.
@Test
public void testDescribeBrokerConfigs() throws Exception {
ConfigResource broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0");
ConfigResource broker1Resource = new ConfigResource(ConfigResource.Type.BROKER, "1");
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(broker0Resource.name()).setResourceType(broker0Resource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))), env.cluster().nodeById(0));
env.kafkaClient().prepareResponseFrom(new DescribeConfigsResponse(new DescribeConfigsResponseData().setResults(asList(new DescribeConfigsResponseData.DescribeConfigsResult().setResourceName(broker1Resource.name()).setResourceType(broker1Resource.type().id()).setErrorCode(Errors.NONE.code()).setConfigs(emptyList())))), env.cluster().nodeById(1));
Map<ConfigResource, KafkaFuture<Config>> result = env.adminClient().describeConfigs(asList(broker0Resource, broker1Resource)).values();
assertEquals(new HashSet<>(asList(broker0Resource, broker1Resource)), result.keySet());
result.get(broker0Resource).get();
result.get(broker1Resource).get();
}
}
Aggregations