Search in sources :

Example 96 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class InMemorySessionStore method init.

@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
    final String threadId = Thread.currentThread().getName();
    final String taskName = context.taskId().toString();
    // If it doesn't, we can't record this metric.
    if (context instanceof InternalProcessorContext) {
        this.context = (InternalProcessorContext) context;
        final StreamsMetricsImpl metrics = this.context.metrics();
        expiredRecordSensor = TaskMetrics.droppedRecordsSensor(threadId, taskName, metrics);
    } else {
        this.context = null;
        expiredRecordSensor = null;
    }
    if (root != null) {
        final boolean consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(context.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false);
        context.register(root, (RecordBatchingStateRestoreCallback) records -> {
            for (final ConsumerRecord<byte[], byte[]> record : records) {
                put(SessionKeySchema.from(Bytes.wrap(record.key())), record.value());
                ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
            }
        });
    }
    open = true;
}
Also used : StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMetrics(org.apache.kafka.streams.processor.internals.metrics.TaskMetrics) LoggerFactory(org.slf4j.LoggerFactory) PositionBound(org.apache.kafka.streams.query.PositionBound) RecordBatchingStateRestoreCallback(org.apache.kafka.streams.processor.internals.RecordBatchingStateRestoreCallback) StoreToProcessorContextAdapter(org.apache.kafka.streams.processor.internals.StoreToProcessorContextAdapter) StateStoreContext(org.apache.kafka.streams.processor.StateStoreContext) Windowed(org.apache.kafka.streams.kstream.Windowed) Map(java.util.Map) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) SessionStore(org.apache.kafka.streams.state.SessionStore) NoSuchElementException(java.util.NoSuchElementException) QueryResult(org.apache.kafka.streams.query.QueryResult) Sensor(org.apache.kafka.common.metrics.Sensor) QueryConfig(org.apache.kafka.streams.query.QueryConfig) Logger(org.slf4j.Logger) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) Iterator(java.util.Iterator) Position(org.apache.kafka.streams.query.Position) Query(org.apache.kafka.streams.query.Query) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) KeyValue(org.apache.kafka.streams.KeyValue) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ChangelogRecordDeserializationHelper(org.apache.kafka.streams.processor.internals.ChangelogRecordDeserializationHelper) Bytes(org.apache.kafka.common.utils.Bytes) Objects(java.util.Objects) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) StateStore(org.apache.kafka.streams.processor.StateStore) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Entry(java.util.Map.Entry) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED(org.apache.kafka.streams.StreamsConfig.InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED) InternalProcessorContext(org.apache.kafka.streams.processor.internals.InternalProcessorContext) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 97 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class RocksDBStoreTest method shouldRestoreRecordsAndConsistencyVectorMultipleTopics.

@Test
public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() {
    final List<ConsumerRecord<byte[], byte[]>> entries = getChangelogRecordsMultipleTopics();
    final Properties props = StreamsTestUtils.getStreamsConfig();
    props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, MockRocksDbConfigSetter.class);
    props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
    dir = TestUtils.tempDirectory();
    context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsConfig(props));
    rocksDBStore.init((StateStoreContext) context, rocksDBStore);
    context.restoreWithHeaders(rocksDBStore.name(), entries);
    assertEquals("a", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "1")))));
    assertEquals("b", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "2")))));
    assertEquals("c", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "3")))));
    assertThat(rocksDBStore.getPosition(), Matchers.notNullValue());
    assertThat(rocksDBStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue());
    assertThat(rocksDBStore.getPosition().getPartitionPositions("A"), hasEntry(0, 3L));
    assertThat(rocksDBStore.getPosition().getPartitionPositions("B"), Matchers.notNullValue());
    assertThat(rocksDBStore.getPosition().getPartitionPositions("B"), hasEntry(0, 2L));
}
Also used : Bytes(org.apache.kafka.common.utils.Bytes) Properties(java.util.Properties) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 98 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class TransactionalMessageCopier method runEventLoop.

public static void runEventLoop(Namespace parsedArgs) {
    final String transactionalId = parsedArgs.getString("transactionalId");
    final String outputTopic = parsedArgs.getString("outputTopic");
    String consumerGroup = parsedArgs.getString("consumerGroup");
    final KafkaProducer<String, String> producer = createProducer(parsedArgs);
    final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
    final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
    boolean groupMode = parsedArgs.getBoolean("groupMode");
    String topicName = parsedArgs.getString("inputTopic");
    final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
    final AtomicLong totalMessageProcessed = new AtomicLong(0);
    if (groupMode) {
        consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {

            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
                numMessagesProcessedSinceLastRebalance.set(0);
                // We use message cap for remaining here as the remainingMessages are not set yet.
                System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
            }
        });
    } else {
        TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
        consumer.assign(singleton(inputPartition));
        remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
    }
    final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
    producer.initTransactions();
    final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
    Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
        isShuttingDown.set(true);
        consumer.wakeup();
        System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
    });
    final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
    try {
        Random random = new Random();
        while (!isShuttingDown.get() && remainingMessages.get() > 0) {
            System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
            if (records.count() > 0) {
                try {
                    producer.beginTransaction();
                    for (ConsumerRecord<String, String> record : records) {
                        producer.send(producerRecordFromConsumerRecord(outputTopic, record));
                    }
                    long messagesSentWithinCurrentTxn = records.count();
                    ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
                    producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
                    if (enableRandomAborts && random.nextInt() % 3 == 0) {
                        abortTransactionAndResetPosition(producer, consumer);
                    } else {
                        producer.commitTransaction();
                        remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
                        numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
                        totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
                    }
                } catch (ProducerFencedException e) {
                    throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
                } catch (KafkaException e) {
                    log.debug("Aborting transaction after catching exception", e);
                    abortTransactionAndResetPosition(producer, consumer);
                }
            }
        }
    } catch (WakeupException e) {
        if (!isShuttingDown.get()) {
            // as part of shutdown.
            throw e;
        }
    } finally {
        Utils.closeQuietly(producer, "producer");
        Utils.closeQuietly(consumer, "consumer");
    }
}
Also used : ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Exit(org.apache.kafka.common.utils.Exit) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Date(java.util.Date) LoggerFactory(org.slf4j.LoggerFactory) KafkaException(org.apache.kafka.common.KafkaException) SimpleDateFormat(java.text.SimpleDateFormat) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Random(java.util.Random) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Arguments.store(net.sourceforge.argparse4j.impl.Arguments.store) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Collections.singleton(java.util.Collections.singleton) ArgumentParser(net.sourceforge.argparse4j.inf.ArgumentParser) Namespace(net.sourceforge.argparse4j.inf.Namespace) Duration(java.time.Duration) Map(java.util.Map) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) DateFormat(java.text.DateFormat) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) Logger(org.slf4j.Logger) Properties(java.util.Properties) Arguments.storeTrue(net.sourceforge.argparse4j.impl.Arguments.storeTrue) WakeupException(org.apache.kafka.common.errors.WakeupException) ArgumentParsers(net.sourceforge.argparse4j.ArgumentParsers) Collection(java.util.Collection) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) WakeupException(org.apache.kafka.common.errors.WakeupException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Random(java.util.Random) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException)

Example 99 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class SmokeTestDriver method verifyAll.

private static VerificationResult verifyAll(final Map<String, Set<Integer>> inputs, final Map<String, Map<String, LinkedList<ConsumerRecord<String, Number>>>> events, final boolean printResults) {
    final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
    boolean pass;
    try (final PrintStream resultStream = new PrintStream(byteArrayOutputStream)) {
        pass = verifyTAgg(resultStream, inputs, events.get("tagg"), printResults);
        pass &= verifySuppressed(resultStream, "min-suppressed", events, printResults);
        pass &= verify(resultStream, "min-suppressed", inputs, events, windowedKey -> {
            final String unwindowedKey = windowedKey.substring(1, windowedKey.length() - 1).replaceAll("@.*", "");
            return getMin(unwindowedKey);
        }, printResults);
        pass &= verifySuppressed(resultStream, "sws-suppressed", events, printResults);
        pass &= verify(resultStream, "min", inputs, events, SmokeTestDriver::getMin, printResults);
        pass &= verify(resultStream, "max", inputs, events, SmokeTestDriver::getMax, printResults);
        pass &= verify(resultStream, "dif", inputs, events, key -> getMax(key).intValue() - getMin(key).intValue(), printResults);
        pass &= verify(resultStream, "sum", inputs, events, SmokeTestDriver::getSum, printResults);
        pass &= verify(resultStream, "cnt", inputs, events, key1 -> getMax(key1).intValue() - getMin(key1).intValue() + 1L, printResults);
        pass &= verify(resultStream, "avg", inputs, events, SmokeTestDriver::getAvg, printResults);
    }
    return new VerificationResult(pass, new String(byteArrayOutputStream.toByteArray(), StandardCharsets.UTF_8));
}
Also used : Arrays(java.util.Arrays) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Exit(org.apache.kafka.common.utils.Exit) ByteArrayOutputStream(java.io.ByteArrayOutputStream) HashMap(java.util.HashMap) Random(java.util.Random) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Function(java.util.function.Function) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Deserializer(org.apache.kafka.common.serialization.Deserializer) LinkedList(java.util.LinkedList) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Utils(org.apache.kafka.common.utils.Utils) PrintStream(java.io.PrintStream) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Stream(java.util.stream.Stream) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Callback(org.apache.kafka.clients.producer.Callback) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) PrintStream(java.io.PrintStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream)

Example 100 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class EmbeddedKafkaCluster method consume.

/**
 * Consume at least n records in a given duration or throw an exception.
 *
 * @param n the number of expected records in this topic.
 * @param maxDuration the max duration to wait for these records (in milliseconds).
 * @param topics the topics to subscribe and consume records from.
 * @return a {@link ConsumerRecords} collection containing at least n records.
 */
public ConsumerRecords<byte[], byte[]> consume(int n, long maxDuration, String... topics) {
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
    int consumedRecords = 0;
    try (KafkaConsumer<byte[], byte[]> consumer = createConsumerAndSubscribeTo(Collections.emptyMap(), topics)) {
        final long startMillis = System.currentTimeMillis();
        long allowedDuration = maxDuration;
        while (allowedDuration > 0) {
            log.debug("Consuming from {} for {} millis.", Arrays.toString(topics), allowedDuration);
            ConsumerRecords<byte[], byte[]> rec = consumer.poll(Duration.ofMillis(allowedDuration));
            if (rec.isEmpty()) {
                allowedDuration = maxDuration - (System.currentTimeMillis() - startMillis);
                continue;
            }
            for (TopicPartition partition : rec.partitions()) {
                final List<ConsumerRecord<byte[], byte[]>> r = rec.records(partition);
                records.computeIfAbsent(partition, t -> new ArrayList<>()).addAll(r);
                consumedRecords += r.size();
            }
            if (consumedRecords >= n) {
                return new ConsumerRecords<>(records);
            }
            allowedDuration = maxDuration - (System.currentTimeMillis() - startMillis);
        }
    }
    throw new RuntimeException("Could not find enough records. found " + consumedRecords + ", expected " + n);
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) LoggerFactory(org.slf4j.LoggerFactory) CoreUtils(kafka.utils.CoreUtils) KafkaException(org.apache.kafka.common.KafkaException) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ENABLE_AUTO_COMMIT_CONFIG(org.apache.kafka.clients.consumer.ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG) KEY_DESERIALIZER_CLASS_CONFIG(org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG) AUTO_OFFSET_RESET_CONFIG(org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG) Duration(java.time.Duration) Map(java.util.Map) BrokerState(org.apache.kafka.metadata.BrokerState) KafkaConfig(kafka.server.KafkaConfig) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) TopicPartition(org.apache.kafka.common.TopicPartition) BOOTSTRAP_SERVERS_CONFIG(org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG) Time(org.apache.kafka.common.utils.Time) Predicate(java.util.function.Predicate) Set(java.util.Set) KafkaFuture(org.apache.kafka.common.KafkaFuture) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Optional(java.util.Optional) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Password(org.apache.kafka.common.config.types.Password) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) DescribeTopicsResult(org.apache.kafka.clients.admin.DescribeTopicsResult) ListenerName(org.apache.kafka.common.network.ListenerName) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) Admin(org.apache.kafka.clients.admin.Admin) KafkaServer(kafka.server.KafkaServer) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) SslConfigs(org.apache.kafka.common.config.SslConfigs) EmbeddedZookeeper(kafka.zk.EmbeddedZookeeper) Logger(org.slf4j.Logger) Properties(java.util.Properties) Files(java.nio.file.Files) GROUP_ID_CONFIG(org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) NewTopic(org.apache.kafka.clients.admin.NewTopic) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) VALUE_DESERIALIZER_CLASS_CONFIG(org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG) EndPoint(kafka.cluster.EndPoint) ConnectException(org.apache.kafka.connect.errors.ConnectException) TestUtils(kafka.utils.TestUtils) Collections(java.util.Collections) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) EndPoint(kafka.cluster.EndPoint) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)314 TopicPartition (org.apache.kafka.common.TopicPartition)160 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)123 List (java.util.List)100 HashMap (java.util.HashMap)98 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)31 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22