Search in sources :

Example 81 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class MirrorSourceTaskTest method testPoll.

@Test
public void testPoll() {
    // Create a consumer mock
    byte[] key1 = "abc".getBytes();
    byte[] value1 = "fgh".getBytes();
    byte[] key2 = "123".getBytes();
    byte[] value2 = "456".getBytes();
    List<ConsumerRecord<byte[], byte[]>> consumerRecordsList = new ArrayList<>();
    String topicName = "test";
    String headerKey = "key";
    RecordHeaders headers = new RecordHeaders(new Header[] { new RecordHeader(headerKey, "value".getBytes()) });
    consumerRecordsList.add(new ConsumerRecord<>(topicName, 0, 0, System.currentTimeMillis(), TimestampType.CREATE_TIME, key1.length, value1.length, key1, value1, headers, Optional.empty()));
    consumerRecordsList.add(new ConsumerRecord<>(topicName, 1, 1, System.currentTimeMillis(), TimestampType.CREATE_TIME, key2.length, value2.length, key2, value2, headers, Optional.empty()));
    ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(topicName, 0), consumerRecordsList));
    @SuppressWarnings("unchecked") KafkaConsumer<byte[], byte[]> consumer = mock(KafkaConsumer.class);
    when(consumer.poll(any())).thenReturn(consumerRecords);
    MirrorMetrics metrics = mock(MirrorMetrics.class);
    String sourceClusterName = "cluster1";
    ReplicationPolicy replicationPolicy = new DefaultReplicationPolicy();
    MirrorSourceTask mirrorSourceTask = new MirrorSourceTask(consumer, metrics, sourceClusterName, replicationPolicy, 50);
    List<SourceRecord> sourceRecords = mirrorSourceTask.poll();
    assertEquals(2, sourceRecords.size());
    for (int i = 0; i < sourceRecords.size(); i++) {
        SourceRecord sourceRecord = sourceRecords.get(i);
        ConsumerRecord<byte[], byte[]> consumerRecord = consumerRecordsList.get(i);
        assertEquals(consumerRecord.key(), sourceRecord.key(), "consumerRecord key does not equal sourceRecord key");
        assertEquals(consumerRecord.value(), sourceRecord.value(), "consumerRecord value does not equal sourceRecord value");
        // We expect that the topicname will be based on the replication policy currently used
        assertEquals(replicationPolicy.formatRemoteTopic(sourceClusterName, topicName), sourceRecord.topic(), "topicName not the same as the current replicationPolicy");
        // We expect that MirrorMaker will keep the same partition assignment
        assertEquals(consumerRecord.partition(), sourceRecord.kafkaPartition().intValue(), "partition assignment not the same as the current replicationPolicy");
        // Check header values
        List<Header> expectedHeaders = new ArrayList<>();
        consumerRecord.headers().forEach(expectedHeaders::add);
        List<org.apache.kafka.connect.header.Header> taskHeaders = new ArrayList<>();
        sourceRecord.headers().forEach(taskHeaders::add);
        compareHeaders(expectedHeaders, taskHeaders);
    }
}
Also used : ArrayList(java.util.ArrayList) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) SourceRecord(org.apache.kafka.connect.source.SourceRecord) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) Test(org.junit.jupiter.api.Test)

Example 82 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class OffsetSyncTest method testSerde.

@Test
public void testSerde() {
    OffsetSync offsetSync = new OffsetSync(new TopicPartition("topic-1", 2), 3, 4);
    byte[] key = offsetSync.recordKey();
    byte[] value = offsetSync.recordValue();
    ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("any-topic", 6, 7, key, value);
    OffsetSync deserialized = OffsetSync.deserializeRecord(record);
    assertEquals(offsetSync.topicPartition(), deserialized.topicPartition(), "Failure on offset sync topic partition serde");
    assertEquals(offsetSync.upstreamOffset(), deserialized.upstreamOffset(), "Failure on upstream offset serde");
    assertEquals(offsetSync.downstreamOffset(), deserialized.downstreamOffset(), "Failure on downstream offset serde");
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 83 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class CheckpointTest method testSerde.

@Test
public void testSerde() {
    Checkpoint checkpoint = new Checkpoint("group-1", new TopicPartition("topic-2", 3), 4, 5, "metadata-6");
    byte[] key = checkpoint.recordKey();
    byte[] value = checkpoint.recordValue();
    ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("any-topic", 7, 8, key, value);
    Checkpoint deserialized = Checkpoint.deserializeRecord(record);
    assertEquals(checkpoint.consumerGroupId(), deserialized.consumerGroupId(), "Failure on checkpoint consumerGroupId serde");
    assertEquals(checkpoint.topicPartition(), deserialized.topicPartition(), "Failure on checkpoint topicPartition serde");
    assertEquals(checkpoint.upstreamOffset(), deserialized.upstreamOffset(), "Failure on checkpoint upstreamOffset serde");
    assertEquals(checkpoint.downstreamOffset(), deserialized.downstreamOffset(), "Failure on checkpoint downstreamOffset serde");
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 84 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testReadCommittedWithCompactedTopic.

@Test
public void testReadCommittedWithCompactedTopic() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long pid1 = 1L;
    long pid2 = 2L;
    long pid3 = 3L;
    appendTransactionalRecords(buffer, pid3, 3L, new SimpleRecord("3".getBytes(), "value".getBytes()), new SimpleRecord("4".getBytes(), "value".getBytes()));
    appendTransactionalRecords(buffer, pid2, 15L, new SimpleRecord("15".getBytes(), "value".getBytes()), new SimpleRecord("16".getBytes(), "value".getBytes()), new SimpleRecord("17".getBytes(), "value".getBytes()));
    appendTransactionalRecords(buffer, pid1, 22L, new SimpleRecord("22".getBytes(), "value".getBytes()), new SimpleRecord("23".getBytes(), "value".getBytes()));
    abortTransaction(buffer, pid2, 28L);
    appendTransactionalRecords(buffer, pid3, 30L, new SimpleRecord("30".getBytes(), "value".getBytes()), new SimpleRecord("31".getBytes(), "value".getBytes()), new SimpleRecord("32".getBytes(), "value".getBytes()));
    commitTransaction(buffer, pid3, 35L);
    appendTransactionalRecords(buffer, pid1, 39L, new SimpleRecord("39".getBytes(), "value".getBytes()), new SimpleRecord("40".getBytes(), "value".getBytes()));
    // transaction from pid1 is aborted, but the marker is not included in the fetch
    buffer.flip();
    // send the fetch
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    // prepare the response. the aborted transactions begin at offsets which are no longer in the log
    List<FetchResponseData.AbortedTransaction> abortedTransactions = Arrays.asList(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(6), new FetchResponseData.AbortedTransaction().setProducerId(pid1).setFirstOffset(0));
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetchedRecords();
    assertTrue(allFetchedRecords.containsKey(tp0));
    List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
    assertEquals(5, fetchedRecords.size());
    assertEquals(Arrays.asList(3L, 4L, 30L, 31L, 32L), collectRecordOffsets(fetchedRecords));
}
Also used : StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) Test(org.junit.jupiter.api.Test)

Example 85 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testFetchResponseMetrics.

@Test
public void testFetchResponseMetrics() {
    buildFetcher();
    String topic1 = "foo";
    String topic2 = "bar";
    TopicPartition tp1 = new TopicPartition(topic1, 0);
    TopicPartition tp2 = new TopicPartition(topic2, 0);
    subscriptions.assignFromUser(mkSet(tp1, tp2));
    Map<String, Integer> partitionCounts = new HashMap<>();
    partitionCounts.put(topic1, 1);
    partitionCounts.put(topic2, 1);
    topicIds.put(topic1, Uuid.randomUuid());
    topicIds.put(topic2, Uuid.randomUuid());
    TopicIdPartition tidp1 = new TopicIdPartition(topicIds.get(topic1), tp1);
    TopicIdPartition tidp2 = new TopicIdPartition(topicIds.get(topic2), tp2);
    client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, tp -> validLeaderEpoch, topicIds));
    int expectedBytes = 0;
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
    for (TopicIdPartition tp : mkSet(tidp1, tidp2)) {
        subscriptions.seek(tp.topicPartition(), 0);
        MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
        for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
        MemoryRecords records = builder.build();
        for (Record record : records.records()) expectedBytes += record.sizeInBytes();
        fetchPartitionData.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(15).setLogStartOffset(0).setRecords(records));
    }
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
    consumerClient.poll(time.timer(0));
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertEquals(3, fetchedRecords.get(tp1).size());
    assertEquals(3, fetchedRecords.get(tp2).size());
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
    assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) LinkedHashMap(java.util.LinkedHashMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)314 TopicPartition (org.apache.kafka.common.TopicPartition)160 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)123 List (java.util.List)100 HashMap (java.util.HashMap)98 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)31 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22