Search in sources :

Example 21 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testReturnCommittedTransactions.

@Test
public void testReturnCommittedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
    commitTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(body -> {
        FetchRequest request = (FetchRequest) body;
        assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
        return true;
    }, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    assertEquals(fetchedRecords.get(tp0).size(), 2);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 22 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testFetchPositionAfterException.

@Test
public void testFetchPositionAfterException() {
    // verify the advancement in the next fetch offset equals to the number of fetched records when
    // some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
    buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    assignFromUser(mkSet(tp0, tp1));
    subscriptions.seek(tp0, 1);
    subscriptions.seek(tp1, 1);
    assertEquals(1, fetcher.sendFetches());
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>();
    fetchRecordsInto(allFetchedRecords);
    assertEquals(1, subscriptions.position(tp0).offset);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, allFetchedRecords.size());
    OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> fetchRecordsInto(allFetchedRecords));
    assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet());
    assertEquals(1L, e.offsetOutOfRangePartitions().get(tp0).longValue());
    assertEquals(1, subscriptions.position(tp0).offset);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, allFetchedRecords.size());
}
Also used : PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.jupiter.api.Test)

Example 23 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testSkippingAbortedTransactions.

@Test
public void testSkippingAbortedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    int currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
    abortTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Fetch<byte[], byte[]> fetch = collectFetch();
    assertEquals(emptyMap(), fetch.records());
    assertTrue(fetch.positionAdvanced());
}
Also used : FetchResponseData(org.apache.kafka.common.message.FetchResponseData) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 24 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testReadCommittedWithCommittedAndAbortedTransactions.

@Test
public void testReadCommittedWithCommittedAndAbortedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    List<FetchResponseData.AbortedTransaction> abortedTransactions = new ArrayList<>();
    long pid1 = 1L;
    long pid2 = 2L;
    // Appends for producer 1 (eventually committed)
    appendTransactionalRecords(buffer, pid1, 0L, new SimpleRecord("commit1-1".getBytes(), "value".getBytes()), new SimpleRecord("commit1-2".getBytes(), "value".getBytes()));
    // Appends for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 2L, new SimpleRecord("abort2-1".getBytes(), "value".getBytes()));
    // commit producer 1
    commitTransaction(buffer, pid1, 3L);
    // append more for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 4L, new SimpleRecord("abort2-2".getBytes(), "value".getBytes()));
    // abort producer 2
    abortTransaction(buffer, pid2, 5L);
    abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(2L));
    // New transaction for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 6L, new SimpleRecord("abort1-1".getBytes(), "value".getBytes()));
    // New transaction for producer 2 (eventually committed)
    appendTransactionalRecords(buffer, pid2, 7L, new SimpleRecord("commit2-1".getBytes(), "value".getBytes()));
    // Add messages for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 8L, new SimpleRecord("abort1-2".getBytes(), "value".getBytes()));
    // abort producer 1
    abortTransaction(buffer, pid1, 9L);
    abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(6));
    // commit producer 2
    commitTransaction(buffer, pid2, 10L);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    // There are only 3 committed records
    List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
    Set<String> fetchedKeys = new HashSet<>();
    for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
        fetchedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
    }
    assertEquals(mkSet("commit1-1", "commit1-2", "commit2-1"), fetchedKeys);
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 25 with ByteArrayDeserializer

use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.

the class FetcherTest method testConsumerPositionUpdatedWhenSkippingAbortedTransactions.

@Test
public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    // Ensure that we don't return any of the aborted records, but yet advance the consumer position.
    assertFalse(fetchedRecords.containsKey(tp0));
    assertEquals(currentOffset, subscriptions.position(tp0).offset);
}
Also used : FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Aggregations

ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)59 TopicPartition (org.apache.kafka.common.TopicPartition)24 ArrayList (java.util.ArrayList)22 Test (org.junit.Test)22 Test (org.junit.jupiter.api.Test)22 List (java.util.List)17 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)17 HashMap (java.util.HashMap)16 ByteBuffer (java.nio.ByteBuffer)14 LinkedHashMap (java.util.LinkedHashMap)14 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)14 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)14 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Metrics (org.apache.kafka.common.metrics.Metrics)10 Arrays.asList (java.util.Arrays.asList)9 Collections.emptyList (java.util.Collections.emptyList)9 Collections.singletonList (java.util.Collections.singletonList)9 Map (java.util.Map)9 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)7