Search in sources :

Example 86 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testFetcherSessionEpochUpdate.

@Test
public void testFetcherSessionEpochUpdate() throws Exception {
    buildFetcher(2);
    MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 1), topicIds);
    client.updateMetadata(initialMetadataResponse);
    assignFromUser(Collections.singleton(tp0));
    subscriptions.seek(tp0, 0L);
    AtomicInteger fetchesRemaining = new AtomicInteger(1000);
    executorService = Executors.newSingleThreadExecutor();
    Future<?> future = executorService.submit(() -> {
        long nextOffset = 0;
        long nextEpoch = 0;
        while (fetchesRemaining.get() > 0) {
            synchronized (consumerClient) {
                if (!client.requests().isEmpty()) {
                    ClientRequest request = client.requests().peek();
                    FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
                    int epoch = fetchRequest.metadata().epoch();
                    assertTrue(epoch == 0 || epoch == nextEpoch, String.format("Unexpected epoch expected %d got %d", nextEpoch, epoch));
                    nextEpoch++;
                    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
                    responseMap.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(nextOffset + 2).setLastStableOffset(nextOffset + 2).setLogStartOffset(0).setRecords(buildRecords(nextOffset, 2, nextOffset)));
                    nextOffset += 2;
                    client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
                    consumerClient.poll(time.timer(0));
                }
            }
        }
        return fetchesRemaining.get();
    });
    long nextFetchOffset = 0;
    while (fetchesRemaining.get() > 0 && !future.isDone()) {
        if (fetcher.sendFetches() == 1) {
            synchronized (consumerClient) {
                consumerClient.poll(time.timer(0));
            }
        }
        if (fetcher.hasCompletedFetches()) {
            Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
            if (!fetchedRecords.isEmpty()) {
                fetchesRemaining.decrementAndGet();
                List<ConsumerRecord<byte[], byte[]>> records = fetchedRecords.get(tp0);
                assertEquals(2, records.size());
                assertEquals(nextFetchOffset, records.get(0).offset());
                assertEquals(nextFetchOffset + 1, records.get(1).offset());
                nextFetchOffset += 2;
            }
            assertTrue(fetchedRecords().isEmpty());
        }
    }
    assertEquals(0, future.get());
}
Also used : TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ClientRequest(org.apache.kafka.clients.ClientRequest) Test(org.junit.jupiter.api.Test)

Example 87 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testHeaders.

@Test
public void testHeaders() {
    buildFetcher();
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());
    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
    MemoryRecords memoryRecords = builder.build();
    List<ConsumerRecord<byte[], byte[]>> records;
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, memoryRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(time.timer(0));
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
    records = recordsByPartition.get(tp0);
    assertEquals(3, records.size());
    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));
    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
Also used : ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 88 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testConsumingViaIncrementalFetchRequests.

@Test
public void testConsumingViaIncrementalFetchRequests() {
    buildFetcher(2);
    List<ConsumerRecord<byte[], byte[]>> records;
    assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1)));
    subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0)));
    subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
    // Fetch some records and establish an incremental fetch session.
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions1 = new LinkedHashMap<>();
    partitions1.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(2).setLastStableOffset(2).setLogStartOffset(0).setRecords(this.records));
    partitions1.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(emptyRecords));
    FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1);
    client.prepareResponse(resp1);
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertFalse(fetchedRecords.containsKey(tp1));
    records = fetchedRecords.get(tp0);
    assertEquals(2, records.size());
    assertEquals(3L, subscriptions.position(tp0).offset);
    assertEquals(1L, subscriptions.position(tp1).offset);
    assertEquals(1, records.get(0).offset());
    assertEquals(2, records.get(1).offset());
    // There is still a buffered record.
    assertEquals(0, fetcher.sendFetches());
    fetchedRecords = fetchedRecords();
    assertFalse(fetchedRecords.containsKey(tp1));
    records = fetchedRecords.get(tp0);
    assertEquals(1, records.size());
    assertEquals(3, records.get(0).offset());
    assertEquals(4L, subscriptions.position(tp0).offset);
    // The second response contains no new records.
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions2 = new LinkedHashMap<>();
    FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2);
    client.prepareResponse(resp2);
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(time.timer(0));
    fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.isEmpty());
    assertEquals(4L, subscriptions.position(tp0).offset);
    assertEquals(1L, subscriptions.position(tp1).offset);
    // The third response contains some new records for tp0.
    LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions3 = new LinkedHashMap<>();
    partitions3.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(this.nextRecords));
    FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3);
    client.prepareResponse(resp3);
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(time.timer(0));
    fetchedRecords = fetchedRecords();
    assertFalse(fetchedRecords.containsKey(tp1));
    records = fetchedRecords.get(tp0);
    assertEquals(2, records.size());
    assertEquals(6L, subscriptions.position(tp0).offset);
    assertEquals(1L, subscriptions.position(tp1).offset);
    assertEquals(4, records.get(0).offset());
    assertEquals(5, records.get(1).offset());
}
Also used : TopicIdPartition(org.apache.kafka.common.TopicIdPartition) FetchResponse(org.apache.kafka.common.requests.FetchResponse) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) Test(org.junit.jupiter.api.Test)

Example 89 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testFetcherIgnoresControlRecords.

@Test
public void testFetcherIgnoresControlRecords() {
    buildFetcher();
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    long producerId = 1;
    short producerEpoch = 0;
    int baseSequence = 0;
    int partitionLeaderEpoch = 0;
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    MemoryRecordsBuilder builder = MemoryRecords.idempotentBuilder(buffer, CompressionType.NONE, 0L, producerId, producerEpoch, baseSequence);
    builder.append(0L, "key".getBytes(), null);
    builder.close();
    MemoryRecords.writeEndTransactionalMarker(buffer, 1L, time.milliseconds(), partitionLeaderEpoch, producerId, producerEpoch, new EndTransactionMarker(ControlRecordType.ABORT, 0));
    buffer.flip();
    client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
    assertTrue(partitionRecords.containsKey(tp0));
    List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp0);
    assertEquals(1, records.size());
    assertEquals(2L, subscriptions.position(tp0).offset);
    ConsumerRecord<byte[], byte[]> record = records.get(0);
    assertArrayEquals("key".getBytes(), record.key());
}
Also used : EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Example 90 with ConsumerRecord

use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.

the class FetcherTest method testFetchWithTopicId.

@Test
public void testFetchWithTopicId() {
    buildFetcher();
    TopicIdPartition tp = new TopicIdPartition(topicId, new TopicPartition(topicName, 0));
    assignFromUser(singleton(tp.topicPartition()));
    subscriptions.seek(tp.topicPartition(), 0);
    // Fetch should use latest version
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), tp, 0, Optional.of(validLeaderEpoch)), fullFetchResponse(tp, this.records, Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
    assertTrue(partitionRecords.containsKey(tp.topicPartition()));
    List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp.topicPartition());
    assertEquals(3, records.size());
    // this is the next fetching position
    assertEquals(4L, subscriptions.position(tp.topicPartition()).offset);
    long offset = 1;
    for (ConsumerRecord<byte[], byte[]> record : records) {
        assertEquals(offset, record.offset());
        offset += 1;
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.jupiter.api.Test)

Aggregations

ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)314 TopicPartition (org.apache.kafka.common.TopicPartition)160 Test (org.junit.Test)145 ArrayList (java.util.ArrayList)123 List (java.util.List)100 HashMap (java.util.HashMap)98 Map (java.util.Map)70 RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)61 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)51 Test (org.junit.jupiter.api.Test)35 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)33 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)31 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)31 LinkedHashMap (java.util.LinkedHashMap)30 Header (org.apache.kafka.common.header.Header)29 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)28 TimeUnit (java.util.concurrent.TimeUnit)27 Set (java.util.Set)24 Collectors (java.util.stream.Collectors)24 ByteBuffer (java.nio.ByteBuffer)22