Search in sources :

Example 1 with ConfigResource

use of org.apache.kafka.common.config.ConfigResource in project ksql by confluentinc.

the class KafkaTopicClientImpl method isTopicDeleteEnabled.

private static boolean isTopicDeleteEnabled(final AdminClient adminClient) {
    try {
        DescribeClusterResult describeClusterResult = adminClient.describeCluster();
        Collection<Node> nodes = describeClusterResult.nodes().get();
        if (nodes.isEmpty()) {
            log.warn("No available broker found to fetch config info.");
            throw new KsqlException("Could not fetch broker information. KSQL cannot initialize");
        }
        ConfigResource resource = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(nodes.iterator().next().id()));
        Map<ConfigResource, Config> config = executeWithRetries(() -> adminClient.describeConfigs(Collections.singleton(resource)).all());
        return config.get(resource).entries().stream().anyMatch(configEntry -> configEntry.name().equalsIgnoreCase("delete.topic.enable") && configEntry.value().equalsIgnoreCase("true"));
    } catch (final Exception e) {
        log.error("Failed to initialize TopicClient: {}", e.getMessage());
        throw new KsqlException("Could not fetch broker information. KSQL cannot initialize", e);
    }
}
Also used : DescribeClusterResult(org.apache.kafka.clients.admin.DescribeClusterResult) Config(org.apache.kafka.clients.admin.Config) TopicConfig(org.apache.kafka.common.config.TopicConfig) Node(org.apache.kafka.common.Node) ConfigResource(org.apache.kafka.common.config.ConfigResource) KafkaTopicException(io.confluent.ksql.exception.KafkaTopicException) RetriableException(org.apache.kafka.common.errors.RetriableException) KafkaResponseGetFailedException(io.confluent.ksql.exception.KafkaResponseGetFailedException) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException)

Example 2 with ConfigResource

use of org.apache.kafka.common.config.ConfigResource in project apache-kafka-on-k8s by banzaicloud.

the class MockAdminClient method describeConfigs.

@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
    Map<ConfigResource, KafkaFuture<Config>> configescriptions = new HashMap<>();
    for (ConfigResource resource : resources) {
        if (resource.type() == ConfigResource.Type.TOPIC) {
            Map<String, String> configs = allTopics.get(resource.name()).configs;
            List<ConfigEntry> configEntries = new ArrayList<>();
            for (Map.Entry<String, String> entry : configs.entrySet()) {
                configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue()));
            }
            KafkaFutureImpl<Config> future = new KafkaFutureImpl<>();
            future.complete(new Config(configEntries));
            configescriptions.put(resource, future);
        } else {
            throw new UnsupportedOperationException("Not implemented yet");
        }
    }
    return new DescribeConfigsResult(configescriptions);
}
Also used : KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) ConfigResource(org.apache.kafka.common.config.ConfigResource) HashMap(java.util.HashMap) Map(java.util.Map)

Example 3 with ConfigResource

use of org.apache.kafka.common.config.ConfigResource in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method describeConfigs.

@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options) {
    final Map<ConfigResource, KafkaFutureImpl<Config>> unifiedRequestFutures = new HashMap<>();
    final Map<ConfigResource, KafkaFutureImpl<Config>> brokerFutures = new HashMap<>(configResources.size());
    // The BROKER resources which we want to describe.  We must make a separate DescribeConfigs
    // request for every BROKER resource we want to describe.
    final Collection<Resource> brokerResources = new ArrayList<>();
    // The non-BROKER resources which we want to describe.  These resources can be described by a
    // single, unified DescribeConfigs request.
    final Collection<Resource> unifiedRequestResources = new ArrayList<>(configResources.size());
    for (ConfigResource resource : configResources) {
        if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
            brokerFutures.put(resource, new KafkaFutureImpl<Config>());
            brokerResources.add(configResourceToResource(resource));
        } else {
            unifiedRequestFutures.put(resource, new KafkaFutureImpl<Config>());
            unifiedRequestResources.add(configResourceToResource(resource));
        }
    }
    final long now = time.milliseconds();
    if (!unifiedRequestResources.isEmpty()) {
        runnable.call(new Call("describeConfigs", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) {

            @Override
            AbstractRequest.Builder createRequest(int timeoutMs) {
                return new DescribeConfigsRequest.Builder(unifiedRequestResources).includeSynonyms(options.includeSynonyms());
            }

            @Override
            void handleResponse(AbstractResponse abstractResponse) {
                DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
                for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : unifiedRequestFutures.entrySet()) {
                    ConfigResource configResource = entry.getKey();
                    KafkaFutureImpl<Config> future = entry.getValue();
                    DescribeConfigsResponse.Config config = response.config(configResourceToResource(configResource));
                    if (config == null) {
                        future.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + configResource));
                        continue;
                    }
                    if (config.error().isFailure()) {
                        future.completeExceptionally(config.error().exception());
                        continue;
                    }
                    List<ConfigEntry> configEntries = new ArrayList<>();
                    for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
                        configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
                    }
                    future.complete(new Config(configEntries));
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                completeAllExceptionally(unifiedRequestFutures.values(), throwable);
            }
        }, now);
    }
    for (Map.Entry<ConfigResource, KafkaFutureImpl<Config>> entry : brokerFutures.entrySet()) {
        final KafkaFutureImpl<Config> brokerFuture = entry.getValue();
        final Resource resource = configResourceToResource(entry.getKey());
        final int nodeId = Integer.parseInt(resource.name());
        runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {

            @Override
            AbstractRequest.Builder createRequest(int timeoutMs) {
                return new DescribeConfigsRequest.Builder(Collections.singleton(resource)).includeSynonyms(options.includeSynonyms());
            }

            @Override
            void handleResponse(AbstractResponse abstractResponse) {
                DescribeConfigsResponse response = (DescribeConfigsResponse) abstractResponse;
                DescribeConfigsResponse.Config config = response.configs().get(resource);
                if (config == null) {
                    brokerFuture.completeExceptionally(new UnknownServerException("Malformed broker response: missing config for " + resource));
                    return;
                }
                if (config.error().isFailure())
                    brokerFuture.completeExceptionally(config.error().exception());
                else {
                    List<ConfigEntry> configEntries = new ArrayList<>();
                    for (DescribeConfigsResponse.ConfigEntry configEntry : config.entries()) {
                        configEntries.add(new ConfigEntry(configEntry.name(), configEntry.value(), configSource(configEntry.source()), configEntry.isSensitive(), configEntry.isReadOnly(), configSynonyms(configEntry)));
                    }
                    brokerFuture.complete(new Config(configEntries));
                }
            }

            @Override
            void handleFailure(Throwable throwable) {
                brokerFuture.completeExceptionally(throwable);
            }
        }, now);
    }
    final Map<ConfigResource, KafkaFuture<Config>> allFutures = new HashMap<>();
    allFutures.putAll(brokerFutures);
    allFutures.putAll(unifiedRequestFutures);
    return new DescribeConfigsResult(allFutures);
}
Also used : HashMap(java.util.HashMap) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) KafkaFuture(org.apache.kafka.common.KafkaFuture) AbstractResponse(org.apache.kafka.common.requests.AbstractResponse) ConfigResource(org.apache.kafka.common.config.ConfigResource) Resource(org.apache.kafka.common.requests.Resource) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ConfigResource(org.apache.kafka.common.config.ConfigResource) Map(java.util.Map) HashMap(java.util.HashMap) DescribeConfigsResponse(org.apache.kafka.common.requests.DescribeConfigsResponse)

Example 4 with ConfigResource

use of org.apache.kafka.common.config.ConfigResource in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClient method alterConfigs.

@Override
public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options) {
    final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>();
    // We must make a separate AlterConfigs request for every BROKER resource we want to alter
    // and send the request to that specific broker. Other resources are grouped together into
    // a single request that may be sent to any broker.
    final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>();
    for (ConfigResource resource : configs.keySet()) {
        if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) {
            NodeProvider nodeProvider = new ConstantNodeIdProvider(Integer.parseInt(resource.name()));
            allFutures.putAll(alterConfigs(configs, options, Collections.singleton(resource), nodeProvider));
        } else
            unifiedRequestResources.add(resource);
    }
    if (!unifiedRequestResources.isEmpty())
        allFutures.putAll(alterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider()));
    return new AlterConfigsResult(new HashMap<ConfigResource, KafkaFuture<Void>>(allFutures));
}
Also used : KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) ConfigResource(org.apache.kafka.common.config.ConfigResource)

Example 5 with ConfigResource

use of org.apache.kafka.common.config.ConfigResource in project samza by apache.

the class TestKafkaSystemAdminJava method getTopicConfigFromKafkaBroker.

private static Map<String, String> getTopicConfigFromKafkaBroker(String topicName) throws Exception {
    List<ConfigResource> configResourceList = ImmutableList.of(new ConfigResource(ConfigResource.Type.TOPIC, topicName));
    Map<ConfigResource, org.apache.kafka.clients.admin.Config> configResourceConfigMap = adminClient().describeConfigs(configResourceList).all().get();
    Map<String, String> kafkaTopicConfig = new HashMap<>();
    configResourceConfigMap.values().forEach(configEntry -> {
        configEntry.entries().forEach(config -> {
            kafkaTopicConfig.put(config.name(), config.value());
        });
    });
    return kafkaTopicConfig;
}
Also used : HashMap(java.util.HashMap) JobConfig(org.apache.samza.config.JobConfig) ApplicationConfig(org.apache.samza.config.ApplicationConfig) MapConfig(org.apache.samza.config.MapConfig) TopicConfig(org.apache.kafka.common.config.TopicConfig) Config(org.apache.samza.config.Config) ConfigResource(org.apache.kafka.common.config.ConfigResource)

Aggregations

ConfigResource (org.apache.kafka.common.config.ConfigResource)64 HashMap (java.util.HashMap)32 Config (org.apache.kafka.clients.admin.Config)23 Map (java.util.Map)22 KafkaFuture (org.apache.kafka.common.KafkaFuture)20 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)20 TopicConfig (org.apache.kafka.common.config.TopicConfig)18 ArrayList (java.util.ArrayList)17 ConfigEntry (org.apache.kafka.clients.admin.ConfigEntry)16 Test (org.junit.Test)15 Collection (java.util.Collection)14 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)13 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)13 Node (org.apache.kafka.common.Node)13 AdminClient (org.apache.kafka.clients.admin.AdminClient)12 ProducerConfig (org.apache.kafka.clients.producer.ProducerConfig)12 Collections (java.util.Collections)11 Collectors (java.util.stream.Collectors)11 TopicMetadataAndConfig (org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig)11 StreamsConfig (org.apache.kafka.streams.StreamsConfig)11