Search in sources :

Example 11 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class ClientUtilsTest method fetchEndOffsetsShouldRethrowRuntimeExceptionAsStreamsException.

@Test
public void fetchEndOffsetsShouldRethrowRuntimeExceptionAsStreamsException() throws Exception {
    final Admin adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
    EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
    EasyMock.expect(result.all()).andStubReturn(allFuture);
    EasyMock.expect(allFuture.get()).andThrow(new RuntimeException());
    replay(adminClient, result, allFuture);
    assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
    verify(adminClient);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Admin(org.apache.kafka.clients.admin.Admin) Map(java.util.Map) Test(org.junit.Test)

Example 12 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class ClientUtilsTest method fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException.

@Test
public void fetchEndOffsetsShouldRethrowExecutionExceptionAsStreamsException() throws Exception {
    final Admin adminClient = EasyMock.createMock(AdminClient.class);
    final ListOffsetsResult result = EasyMock.createNiceMock(ListOffsetsResult.class);
    final KafkaFuture<Map<TopicPartition, ListOffsetsResultInfo>> allFuture = EasyMock.createMock(KafkaFuture.class);
    EasyMock.expect(adminClient.listOffsets(EasyMock.anyObject())).andStubReturn(result);
    EasyMock.expect(result.all()).andStubReturn(allFuture);
    EasyMock.expect(allFuture.get()).andThrow(new ExecutionException(new RuntimeException()));
    replay(adminClient, result, allFuture);
    assertThrows(StreamsException.class, () -> fetchEndOffsets(PARTITIONS, adminClient));
    verify(adminClient);
}
Also used : ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Admin(org.apache.kafka.clients.admin.Admin) ExecutionException(java.util.concurrent.ExecutionException) Map(java.util.Map) Test(org.junit.Test)

Example 13 with Admin

use of org.apache.kafka.clients.admin.Admin in project kafka by apache.

the class EosTestDriver method verify.

public static void verify(final String kafka, final boolean withRepartitioning) {
    final Properties props = new Properties();
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "verifier");
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, IsolationLevel.READ_COMMITTED.toString().toLowerCase(Locale.ROOT));
    try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
        verifyAllTransactionFinished(consumer, kafka, withRepartitioning);
    } catch (final Exception e) {
        e.printStackTrace(System.err);
        System.out.println("FAILED");
        return;
    }
    final Map<TopicPartition, Long> committedOffsets;
    try (final Admin adminClient = Admin.create(props)) {
        ensureStreamsApplicationDown(adminClient);
        committedOffsets = getCommittedOffsets(adminClient, withRepartitioning);
    }
    final String[] allInputTopics;
    final String[] allOutputTopics;
    if (withRepartitioning) {
        allInputTopics = new String[] { "data", "repartition" };
        allOutputTopics = new String[] { "echo", "min", "sum", "repartition", "max", "cnt" };
    } else {
        allInputTopics = new String[] { "data" };
        allOutputTopics = new String[] { "echo", "min", "sum" };
    }
    final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> inputRecordsPerTopicPerPartition;
    try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
        final List<TopicPartition> partitions = getAllPartitions(consumer, allInputTopics);
        consumer.assign(partitions);
        consumer.seekToBeginning(partitions);
        inputRecordsPerTopicPerPartition = getRecords(consumer, committedOffsets, withRepartitioning, true);
    } catch (final Exception e) {
        e.printStackTrace(System.err);
        System.out.println("FAILED");
        return;
    }
    final Map<String, Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>>> outputRecordsPerTopicPerPartition;
    try (final KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
        final List<TopicPartition> partitions = getAllPartitions(consumer, allOutputTopics);
        consumer.assign(partitions);
        consumer.seekToBeginning(partitions);
        outputRecordsPerTopicPerPartition = getRecords(consumer, consumer.endOffsets(partitions), withRepartitioning, false);
    } catch (final Exception e) {
        e.printStackTrace(System.err);
        System.out.println("FAILED");
        return;
    }
    verifyReceivedAllRecords(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("echo"));
    if (withRepartitioning) {
        verifyReceivedAllRecords(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("repartition"));
    }
    verifyMin(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("min"));
    verifySum(inputRecordsPerTopicPerPartition.get("data"), outputRecordsPerTopicPerPartition.get("sum"));
    if (withRepartitioning) {
        verifyMax(inputRecordsPerTopicPerPartition.get("repartition"), outputRecordsPerTopicPerPartition.get("max"));
        verifyCnt(inputRecordsPerTopicPerPartition.get("repartition"), outputRecordsPerTopicPerPartition.get("cnt"));
    }
    // do not modify: required test output
    System.out.println("ALL-RECORDS-DELIVERED");
    System.out.flush();
}
Also used : KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Properties(java.util.Properties) Admin(org.apache.kafka.clients.admin.Admin) TimeoutException(org.apache.kafka.common.errors.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map)

Example 14 with Admin

use of org.apache.kafka.clients.admin.Admin in project druid by druid-io.

the class KafkaSupervisorTest method addSomeEvents.

private void addSomeEvents(int numEventsPerPartition) throws Exception {
    // create topic manually
    try (Admin admin = kafkaServer.newAdminClient()) {
        admin.createTopics(Collections.singletonList(new NewTopic(topic, NUM_PARTITIONS, (short) 1))).all().get();
    }
    try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
        kafkaProducer.initTransactions();
        kafkaProducer.beginTransaction();
        for (int i = 0; i < NUM_PARTITIONS; i++) {
            for (int j = 0; j < numEventsPerPartition; j++) {
                kafkaProducer.send(new ProducerRecord<>(topic, i, null, StringUtils.toUtf8(StringUtils.format("event-%d", j)))).get();
            }
        }
        kafkaProducer.commitTransaction();
    }
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NewTopic(org.apache.kafka.clients.admin.NewTopic) Admin(org.apache.kafka.clients.admin.Admin)

Example 15 with Admin

use of org.apache.kafka.clients.admin.Admin in project druid by druid-io.

the class KafkaSupervisorTest method addMoreEvents.

private void addMoreEvents(int numEventsPerPartition, int num_partitions) throws Exception {
    try (Admin admin = kafkaServer.newAdminClient()) {
        admin.createPartitions(Collections.singletonMap(topic, NewPartitions.increaseTo(num_partitions))).all().get();
    }
    try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
        kafkaProducer.initTransactions();
        kafkaProducer.beginTransaction();
        for (int i = NUM_PARTITIONS; i < num_partitions; i++) {
            for (int j = 0; j < numEventsPerPartition; j++) {
                kafkaProducer.send(new ProducerRecord<>(topic, i, null, StringUtils.toUtf8(StringUtils.format("event-%d", j)))).get();
            }
        }
        kafkaProducer.commitTransaction();
    }
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Admin(org.apache.kafka.clients.admin.Admin)

Aggregations

Admin (org.apache.kafka.clients.admin.Admin)27 ExecutionException (java.util.concurrent.ExecutionException)12 Map (java.util.Map)11 Properties (java.util.Properties)9 HashMap (java.util.HashMap)8 TopicPartition (org.apache.kafka.common.TopicPartition)8 NewTopic (org.apache.kafka.clients.admin.NewTopic)7 AdminClientConfig (org.apache.kafka.clients.admin.AdminClientConfig)6 Test (org.junit.Test)6 Collection (java.util.Collection)5 ConfigResource (org.apache.kafka.common.config.ConfigResource)5 Arrays (java.util.Arrays)4 Collections (java.util.Collections)4 Optional (java.util.Optional)4 Set (java.util.Set)4 Config (org.apache.kafka.clients.admin.Config)4 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)4 MirrorMakerConfig (org.apache.kafka.connect.mirror.MirrorMakerConfig)4 Logger (org.slf4j.Logger)4 IOException (java.io.IOException)3