Search in sources :

Example 6 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project apache-kafka-on-k8s by banzaicloud.

the class ClientCompatibilityTest method testAdminClient.

void testAdminClient() throws Throwable {
    Properties adminProps = new Properties();
    adminProps.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, testConfig.bootstrapServer);
    try (final AdminClient client = AdminClient.create(adminProps)) {
        while (true) {
            Collection<Node> nodes = client.describeCluster().nodes().get();
            if (nodes.size() == testConfig.numClusterNodes) {
                break;
            } else if (nodes.size() > testConfig.numClusterNodes) {
                throw new KafkaException("Expected to see " + testConfig.numClusterNodes + " nodes, but saw " + nodes.size());
            }
            Thread.sleep(1);
            log.info("Saw only {} cluster nodes.  Waiting to see {}.", nodes.size(), testConfig.numClusterNodes);
        }
        tryFeature("createTopics", testConfig.createTopicsSupported, new Invoker() {

            @Override
            public void invoke() throws Throwable {
                try {
                    client.createTopics(Collections.singleton(new NewTopic("newtopic", 1, (short) 1))).all().get();
                } catch (ExecutionException e) {
                    throw e.getCause();
                }
            }
        }, new ResultTester() {

            @Override
            public void test() throws Throwable {
                while (true) {
                    try {
                        client.describeTopics(Collections.singleton("newtopic")).all().get();
                        break;
                    } catch (ExecutionException e) {
                        if (e.getCause() instanceof UnknownTopicOrPartitionException)
                            continue;
                        throw e;
                    }
                }
            }
        });
        while (true) {
            Collection<TopicListing> listings = client.listTopics().listings().get();
            if (!testConfig.createTopicsSupported)
                break;
            boolean foundNewTopic = false;
            for (TopicListing listing : listings) {
                if (listing.name().equals("newtopic")) {
                    if (listing.isInternal())
                        throw new KafkaException("Did not expect newtopic to be an internal topic.");
                    foundNewTopic = true;
                }
            }
            if (foundNewTopic)
                break;
            Thread.sleep(1);
            log.info("Did not see newtopic.  Retrying listTopics...");
        }
        tryFeature("describeAclsSupported", testConfig.describeAclsSupported, new Invoker() {

            @Override
            public void invoke() throws Throwable {
                try {
                    client.describeAcls(AclBindingFilter.ANY).values().get();
                } catch (ExecutionException e) {
                    if (e.getCause() instanceof SecurityDisabledException)
                        return;
                    throw e.getCause();
                }
            }
        });
    }
}
Also used : Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) Properties(java.util.Properties) TopicListing(org.apache.kafka.clients.admin.TopicListing) KafkaException(org.apache.kafka.common.KafkaException) NewTopic(org.apache.kafka.clients.admin.NewTopic) ExecutionException(java.util.concurrent.ExecutionException) SecurityDisabledException(org.apache.kafka.common.errors.SecurityDisabledException) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 7 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project eventapis by kloiasoft.

the class Eventapis method main.

public static void main(String[] args) throws ExecutionException, InterruptedException {
    Map props = new HashMap<>();
    // list of host:port pairs used for establishing the initial connections
    // to the Kakfa cluster
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka-local:9092");
    // props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
    // JsonSerializer.class);
    // props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
    // JsonSerializer.class);
    // value to block, after which it will throw a TimeoutException
    props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 50000);
    AdminClient adminClient = AdminClient.create(props);
    adminClient.describeCluster();
    Collection<TopicListing> topicListings = adminClient.listTopics().listings().get();
    System.out.println(topicListings);
}
Also used : HashMap(java.util.HashMap) TopicListing(org.apache.kafka.clients.admin.TopicListing) Map(java.util.Map) HashMap(java.util.HashMap) AdminClient(org.apache.kafka.clients.admin.AdminClient)

Example 8 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.

the class KafkaEnumeratorTest method testKafkaClientProperties.

@Test
public void testKafkaClientProperties() throws Exception {
    Properties properties = new Properties();
    String clientIdPrefix = "test-prefix";
    Integer defaultTimeoutMs = 99999;
    properties.setProperty(KafkaSourceOptions.CLIENT_ID_PREFIX.key(), clientIdPrefix);
    properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(defaultTimeoutMs));
    try (MockSplitEnumeratorContext<KafkaPartitionSplit> context = new MockSplitEnumeratorContext<>(NUM_SUBTASKS);
        KafkaSourceEnumerator enumerator = createEnumerator(context, ENABLE_PERIODIC_PARTITION_DISCOVERY, PRE_EXISTING_TOPICS, Collections.emptySet(), properties)) {
        enumerator.start();
        AdminClient adminClient = (AdminClient) Whitebox.getInternalState(enumerator, "adminClient");
        assertNotNull(adminClient);
        String clientId = (String) Whitebox.getInternalState(adminClient, "clientId");
        assertNotNull(clientId);
        assertTrue(clientId.startsWith(clientIdPrefix));
        assertEquals(defaultTimeoutMs, Whitebox.getInternalState(adminClient, "defaultApiTimeoutMs"));
        assertNotNull(clientId);
        assertTrue(clientId.startsWith(clientIdPrefix));
    }
}
Also used : KafkaPartitionSplit(org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit) MockSplitEnumeratorContext(org.apache.flink.api.connector.source.mocks.MockSplitEnumeratorContext) Properties(java.util.Properties) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.Test)

Example 9 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.

the class KafkaSourceReaderTest method testCommitOffsetsWithoutAliveFetchers.

// -----------------------------------------
@Test
void testCommitOffsetsWithoutAliveFetchers() throws Exception {
    final String groupId = "testCommitOffsetsWithoutAliveFetchers";
    try (KafkaSourceReader<Integer> reader = (KafkaSourceReader<Integer>) createReader(Boundedness.CONTINUOUS_UNBOUNDED, groupId)) {
        KafkaPartitionSplit split = new KafkaPartitionSplit(new TopicPartition(TOPIC, 0), 0, NUM_RECORDS_PER_SPLIT);
        reader.addSplits(Collections.singletonList(split));
        reader.notifyNoMoreSplits();
        ReaderOutput<Integer> output = new TestingReaderOutput<>();
        InputStatus status;
        do {
            status = reader.pollNext(output);
        } while (status != InputStatus.NOTHING_AVAILABLE);
        pollUntil(reader, output, () -> reader.getNumAliveFetchers() == 0, "The split fetcher did not exit before timeout.");
        reader.snapshotState(100L);
        reader.notifyCheckpointComplete(100L);
        // Due to a bug in KafkaConsumer, when the consumer closes, the offset commit callback
        // won't be fired, so the offsetsToCommit map won't be cleaned. To make the test
        // stable, we add a split whose starting offset is the log end offset, so the
        // split fetcher won't become idle and exit after commitOffsetAsync is invoked from
        // notifyCheckpointComplete().
        reader.addSplits(Collections.singletonList(new KafkaPartitionSplit(new TopicPartition(TOPIC, 0), NUM_RECORDS_PER_SPLIT)));
        pollUntil(reader, output, () -> reader.getOffsetsToCommit().isEmpty(), "The offset commit did not finish before timeout.");
    }
    // Verify the committed offsets.
    try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
        Map<TopicPartition, OffsetAndMetadata> committedOffsets = adminClient.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get();
        assertThat(committedOffsets).hasSize(1);
        assertThat(committedOffsets.values()).extracting(OffsetAndMetadata::offset).allMatch(offset -> offset == NUM_RECORDS_PER_SPLIT);
    }
}
Also used : TestingReaderOutput(org.apache.flink.connector.testutils.source.reader.TestingReaderOutput) KafkaPartitionSplit(org.apache.flink.connector.kafka.source.split.KafkaPartitionSplit) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) InputStatus(org.apache.flink.core.io.InputStatus) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.jupiter.api.Test)

Example 10 with AdminClient

use of org.apache.kafka.clients.admin.AdminClient in project flink by apache.

the class KafkaSourceReaderTest method testOffsetCommitOnCheckpointComplete.

@Test
void testOffsetCommitOnCheckpointComplete() throws Exception {
    final String groupId = "testOffsetCommitOnCheckpointComplete";
    try (KafkaSourceReader<Integer> reader = (KafkaSourceReader<Integer>) createReader(Boundedness.CONTINUOUS_UNBOUNDED, groupId)) {
        reader.addSplits(getSplits(numSplits, NUM_RECORDS_PER_SPLIT, Boundedness.CONTINUOUS_UNBOUNDED));
        ValidatingSourceOutput output = new ValidatingSourceOutput();
        long checkpointId = 0;
        do {
            checkpointId++;
            reader.pollNext(output);
            // Create a checkpoint for each message consumption, but not complete them.
            reader.snapshotState(checkpointId);
        } while (output.count() < totalNumRecords);
        // The completion of the last checkpoint should subsume all the previous checkpoitns.
        assertThat(reader.getOffsetsToCommit()).hasSize((int) checkpointId);
        long lastCheckpointId = checkpointId;
        waitUtil(() -> {
            try {
                reader.notifyCheckpointComplete(lastCheckpointId);
            } catch (Exception exception) {
                throw new RuntimeException("Caught unexpected exception when polling from the reader", exception);
            }
            return reader.getOffsetsToCommit().isEmpty();
        }, Duration.ofSeconds(60), Duration.ofSeconds(1), "The offset commit did not finish before timeout.");
    }
    // Verify the committed offsets.
    try (AdminClient adminClient = KafkaSourceTestEnv.getAdminClient()) {
        Map<TopicPartition, OffsetAndMetadata> committedOffsets = adminClient.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get();
        assertThat(committedOffsets).hasSize(numSplits);
        assertThat(committedOffsets.values()).extracting(OffsetAndMetadata::offset).allMatch(offset -> offset == NUM_RECORDS_PER_SPLIT);
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) AdminClient(org.apache.kafka.clients.admin.AdminClient) Test(org.junit.jupiter.api.Test)

Aggregations

AdminClient (org.apache.kafka.clients.admin.AdminClient)70 Test (org.junit.Test)38 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)31 NewTopic (org.apache.kafka.clients.admin.NewTopic)30 StreamsConfig (org.apache.kafka.streams.StreamsConfig)29 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)27 HashMap (java.util.HashMap)24 TopicMetadataAndConfig (org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig)18 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)18 Config (org.apache.kafka.clients.admin.Config)15 Map (java.util.Map)14 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)14 ProducerConfig (org.apache.kafka.clients.producer.ProducerConfig)14 TopicConfig (org.apache.kafka.common.config.TopicConfig)13 MockTime (org.apache.kafka.common.utils.MockTime)13 TopicExistsException (org.apache.kafka.common.errors.TopicExistsException)11 ArrayList (java.util.ArrayList)10 TopicPartitionInfo (org.apache.kafka.common.TopicPartitionInfo)10 ConfigResource (org.apache.kafka.common.config.ConfigResource)10 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)10