Search in sources :

Example 1 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class ClientAuthenticationFailureTest method testAdminClientWithInvalidCredentials.

@Test
public void testAdminClientWithInvalidCredentials() {
    Map<String, Object> props = new HashMap<>(saslClientConfigs);
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + server.port());
    try (Admin client = Admin.create(props)) {
        KafkaFuture<Map<String, TopicDescription>> future = client.describeTopics(Collections.singleton("test")).allTopicNames();
        TestUtils.assertFutureThrows(future, SaslAuthenticationException.class);
    }
}
Also used : HashMap(java.util.HashMap) Admin(org.apache.kafka.clients.admin.Admin) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.jupiter.api.Test)

Example 2 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class TopicAdmin method endOffsets.

/**
 * Fetch the most recent offset for each of the supplied {@link TopicPartition} objects.
 *
 * @param partitions the topic partitions
 * @return the map of offset for each topic partition, or an empty map if the supplied partitions
 *         are null or empty
 * @throws UnsupportedVersionException if the admin client cannot read end offsets
 * @throws TimeoutException if the offset metadata could not be fetched before the amount of time allocated
 *         by {@code request.timeout.ms} expires, and this call can be retried
 * @throws LeaderNotAvailableException if the leader was not available and this call can be retried
 * @throws RetriableException if a retriable error occurs, or the thread is interrupted while attempting
 *         to perform this operation
 * @throws ConnectException if a non retriable error occurs
 */
public Map<TopicPartition, Long> endOffsets(Set<TopicPartition> partitions) {
    if (partitions == null || partitions.isEmpty()) {
        return Collections.emptyMap();
    }
    Map<TopicPartition, OffsetSpec> offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest()));
    ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap);
    // Get the individual result for each topic partition so we have better error messages
    Map<TopicPartition, Long> result = new HashMap<>();
    for (TopicPartition partition : partitions) {
        try {
            ListOffsetsResultInfo info = resultFuture.partitionResult(partition).get();
            result.put(partition, info.offset());
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            String topic = partition.topic();
            if (cause instanceof AuthorizationException) {
                String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
                throw new ConnectException(msg, e);
            } else if (cause instanceof UnsupportedVersionException) {
                // Should theoretically never happen, because this method is the same as what the consumer uses and therefore
                // should exist in the broker since before the admin client was added
                String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers());
                throw new UnsupportedVersionException(msg, e);
            } else if (cause instanceof TimeoutException) {
                String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
                throw new TimeoutException(msg, e);
            } else if (cause instanceof LeaderNotAvailableException) {
                String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers());
                throw new LeaderNotAvailableException(msg, e);
            } else if (cause instanceof org.apache.kafka.common.errors.RetriableException) {
                throw (org.apache.kafka.common.errors.RetriableException) cause;
            } else {
                String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers());
                throw new ConnectException(msg, e);
            }
        } catch (InterruptedException e) {
            Thread.interrupted();
            String msg = String.format("Interrupted while attempting to read end offsets for topic '%s' on brokers at %s", partition.topic(), bootstrapServers());
            throw new RetriableException(msg, e);
        }
    }
    return result;
}
Also used : Config(org.apache.kafka.clients.admin.Config) Arrays(java.util.Arrays) DescribeTopicsOptions(org.apache.kafka.clients.admin.DescribeTopicsOptions) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) ConfigEntry(org.apache.kafka.clients.admin.ConfigEntry) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) Function(java.util.function.Function) HashSet(java.util.HashSet) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) ConfigResource(org.apache.kafka.common.config.ConfigResource) Duration(java.time.Duration) Map(java.util.Map) Admin(org.apache.kafka.clients.admin.Admin) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) TopicConfig(org.apache.kafka.common.config.TopicConfig) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) InvalidConfigurationException(org.apache.kafka.common.errors.InvalidConfigurationException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Logger(org.slf4j.Logger) DescribeConfigsOptions(org.apache.kafka.clients.admin.DescribeConfigsOptions) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) Collection(java.util.Collection) NewTopic(org.apache.kafka.clients.admin.NewTopic) Set(java.util.Set) KafkaFuture(org.apache.kafka.common.KafkaFuture) ConfigException(org.apache.kafka.common.config.ConfigException) Collectors(java.util.stream.Collectors) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) Objects(java.util.Objects) ExecutionException(java.util.concurrent.ExecutionException) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) RetriableException(org.apache.kafka.connect.errors.RetriableException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) ConnectException(org.apache.kafka.connect.errors.ConnectException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Optional(java.util.Optional) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) CreateTopicsOptions(org.apache.kafka.clients.admin.CreateTopicsOptions) Collections(java.util.Collections) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ExecutionException(java.util.concurrent.ExecutionException) ConnectException(org.apache.kafka.connect.errors.ConnectException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) RetriableException(org.apache.kafka.connect.errors.RetriableException)

Example 3 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class TransactionsCommand method execute.

static void execute(String[] args, Function<Namespace, Admin> adminSupplier, PrintStream out, Time time) throws Exception {
    List<TransactionsCommand> commands = Arrays.asList(new ListTransactionsCommand(time), new DescribeTransactionsCommand(time), new DescribeProducersCommand(time), new AbortTransactionCommand(time), new FindHangingTransactionsCommand(time));
    ArgumentParser parser = buildBaseParser();
    Subparsers subparsers = parser.addSubparsers().dest("command").title("commands").metavar("COMMAND");
    commands.forEach(command -> command.addSubparser(subparsers));
    final Namespace ns;
    try {
        ns = parser.parseArgs(args);
    } catch (ArgumentParserException e) {
        parser.handleError(e);
        Exit.exit(1);
        return;
    }
    Admin admin = adminSupplier.apply(ns);
    String commandName = ns.getString("command");
    Optional<TransactionsCommand> commandOpt = commands.stream().filter(cmd -> cmd.name().equals(commandName)).findFirst();
    if (!commandOpt.isPresent()) {
        printErrorAndExit("Unexpected command " + commandName);
    }
    TransactionsCommand command = commandOpt.get();
    command.execute(admin, ns, out);
    Exit.exit(0);
}
Also used : DescribeProducersOptions(org.apache.kafka.clients.admin.DescribeProducersOptions) Arrays(java.util.Arrays) ProducerState(org.apache.kafka.clients.admin.ProducerState) Exit(org.apache.kafka.common.utils.Exit) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) Function(java.util.function.Function) Arguments.store(net.sourceforge.argparse4j.impl.Arguments.store) ArrayList(java.util.ArrayList) Collections.singletonList(java.util.Collections.singletonList) HashSet(java.util.HashSet) OptionalLong(java.util.OptionalLong) Collections.singleton(java.util.Collections.singleton) ArgumentParser(net.sourceforge.argparse4j.inf.ArgumentParser) ArgumentParserException(net.sourceforge.argparse4j.inf.ArgumentParserException) Namespace(net.sourceforge.argparse4j.inf.Namespace) Map(java.util.Map) ListTopicsOptions(org.apache.kafka.clients.admin.ListTopicsOptions) Admin(org.apache.kafka.clients.admin.Admin) TransactionListing(org.apache.kafka.clients.admin.TransactionListing) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) AbortTransactionSpec(org.apache.kafka.clients.admin.AbortTransactionSpec) TransactionDescription(org.apache.kafka.clients.admin.TransactionDescription) DescribeProducersResult(org.apache.kafka.clients.admin.DescribeProducersResult) Utils(org.apache.kafka.common.utils.Utils) PrintStream(java.io.PrintStream) TopicPartition(org.apache.kafka.common.TopicPartition) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) Subparsers(net.sourceforge.argparse4j.inf.Subparsers) Logger(org.slf4j.Logger) Properties(java.util.Properties) DescribeTransactionsResult(org.apache.kafka.clients.admin.DescribeTransactionsResult) Time(org.apache.kafka.common.utils.Time) ArgumentParsers(net.sourceforge.argparse4j.ArgumentParsers) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) Collection(java.util.Collection) Subparser(net.sourceforge.argparse4j.inf.Subparser) Set(java.util.Set) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) ListTransactionsOptions(org.apache.kafka.clients.admin.ListTransactionsOptions) TransactionalIdNotFoundException(org.apache.kafka.common.errors.TransactionalIdNotFoundException) Optional(java.util.Optional) ArgumentGroup(net.sourceforge.argparse4j.inf.ArgumentGroup) Collections(java.util.Collections) Admin(org.apache.kafka.clients.admin.Admin) ArgumentParser(net.sourceforge.argparse4j.inf.ArgumentParser) Namespace(net.sourceforge.argparse4j.inf.Namespace) Subparsers(net.sourceforge.argparse4j.inf.Subparsers) ArgumentParserException(net.sourceforge.argparse4j.inf.ArgumentParserException)

Example 4 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class EmbeddedKafkaCluster method createTopic.

/**
 * Create a Kafka topic with the given parameters.
 *
 * @param topic             The name of the topic.
 * @param partitions        The number of partitions for this topic.
 * @param replication       The replication factor for (partitions of) this topic.
 * @param topicConfig       Additional topic-level configuration settings.
 * @param adminClientConfig Additional admin client configuration settings.
 */
public void createTopic(String topic, int partitions, int replication, Map<String, String> topicConfig, Properties adminClientConfig) {
    if (replication > brokers.length) {
        throw new InvalidReplicationFactorException("Insufficient brokers (" + brokers.length + ") for desired replication (" + replication + ")");
    }
    log.info("Creating topic { name: {}, partitions: {}, replication: {}, config: {} }", topic, partitions, replication, topicConfig);
    final NewTopic newTopic = new NewTopic(topic, partitions, (short) replication);
    newTopic.configs(topicConfig);
    try (final Admin adminClient = createAdminClient(adminClientConfig)) {
        adminClient.createTopics(Collections.singletonList(newTopic)).all().get();
    } catch (final InterruptedException | ExecutionException e) {
        throw new RuntimeException(e);
    }
}
Also used : InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) NewTopic(org.apache.kafka.clients.admin.NewTopic) Admin(org.apache.kafka.clients.admin.Admin) ExecutionException(java.util.concurrent.ExecutionException)

Example 5 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class EosIntegrationTest method shouldCommitCorrectOffsetIfInputTopicIsTransactional.

@Test
public void shouldCommitCorrectOffsetIfInputTopicIsTransactional() throws Exception {
    runSimpleCopyTest(1, SINGLE_PARTITION_INPUT_TOPIC, null, SINGLE_PARTITION_OUTPUT_TOPIC, true, eosConfig);
    try (final Admin adminClient = Admin.create(mkMap(mkEntry(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
        final Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(mkMap(mkEntry(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ConsumerConfig.GROUP_ID_CONFIG, applicationId), mkEntry(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class), mkEntry(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class)))) {
        waitForEmptyConsumerGroup(adminClient, applicationId, 5 * MAX_POLL_INTERVAL_MS);
        final TopicPartition topicPartition = new TopicPartition(SINGLE_PARTITION_INPUT_TOPIC, 0);
        final Collection<TopicPartition> topicPartitions = Collections.singleton(topicPartition);
        final long committedOffset = adminClient.listConsumerGroupOffsets(applicationId).partitionsToOffsetAndMetadata().get().get(topicPartition).offset();
        consumer.assign(topicPartitions);
        final long consumerPosition = consumer.position(topicPartition);
        final long endOffset = consumer.endOffsets(topicPartitions).get(topicPartition);
        assertThat(committedOffset, equalTo(consumerPosition));
        assertThat(committedOffset, equalTo(endOffset));
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Admin(org.apache.kafka.clients.admin.Admin) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

Admin (org.apache.kafka.clients.admin.Admin)27 ExecutionException (java.util.concurrent.ExecutionException)12 Map (java.util.Map)11 Properties (java.util.Properties)9 HashMap (java.util.HashMap)8 TopicPartition (org.apache.kafka.common.TopicPartition)8 NewTopic (org.apache.kafka.clients.admin.NewTopic)7 AdminClientConfig (org.apache.kafka.clients.admin.AdminClientConfig)6 Test (org.junit.Test)6 Collection (java.util.Collection)5 ConfigResource (org.apache.kafka.common.config.ConfigResource)5 Arrays (java.util.Arrays)4 Collections (java.util.Collections)4 Optional (java.util.Optional)4 Set (java.util.Set)4 Config (org.apache.kafka.clients.admin.Config)4 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)4 MirrorMakerConfig (org.apache.kafka.connect.mirror.MirrorMakerConfig)4 Logger (org.slf4j.Logger)4 IOException (java.io.IOException)3