Search in sources :

Example 6 with OffsetOutOfRangeException

use of org.apache.kafka.clients.consumer.OffsetOutOfRangeException in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchPositionAfterException.

@Test
public void testFetchPositionAfterException() {
    // verify the advancement in the next fetch offset equals to the number of fetched records when
    // some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
    subscriptionsNoAutoReset.assignFromUser(Utils.mkSet(tp0, tp1));
    subscriptionsNoAutoReset.seek(tp0, 1);
    subscriptionsNoAutoReset.seek(tp1, 1);
    assertEquals(1, fetcherNoAutoReset.sendFetches());
    Map<TopicPartition, FetchResponse.PartitionData> partitions = new LinkedHashMap<>();
    partitions.put(tp1, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, records));
    partitions.put(tp0, new FetchResponse.PartitionData(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
    client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
    consumerClient.poll(0);
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
    List<OffsetOutOfRangeException> exceptions = new ArrayList<>();
    for (List<ConsumerRecord<byte[], byte[]>> records : fetcherNoAutoReset.fetchedRecords().values()) fetchedRecords.addAll(records);
    assertEquals(fetchedRecords.size(), subscriptionsNoAutoReset.position(tp1) - 1);
    try {
        for (List<ConsumerRecord<byte[], byte[]>> records : fetcherNoAutoReset.fetchedRecords().values()) fetchedRecords.addAll(records);
    } catch (OffsetOutOfRangeException e) {
        exceptions.add(e);
    }
    assertEquals(4, subscriptionsNoAutoReset.position(tp1).longValue());
    assertEquals(3, fetchedRecords.size());
    // Should have received one OffsetOutOfRangeException for partition tp1
    assertEquals(1, exceptions.size());
    OffsetOutOfRangeException e = exceptions.get(0);
    assertTrue(e.offsetOutOfRangePartitions().containsKey(tp0));
    assertEquals(e.offsetOutOfRangePartitions().size(), 1);
}
Also used : PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) FetchResponse(org.apache.kafka.common.requests.FetchResponse) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.Test)

Example 7 with OffsetOutOfRangeException

use of org.apache.kafka.clients.consumer.OffsetOutOfRangeException in project kafka by apache.

the class FetcherTest method testCompletedFetchRemoval.

@Test
public void testCompletedFetchRemoval() {
    // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records.
    buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
    assignFromUser(mkSet(tp0, tp1, tp2, tp3));
    subscriptions.seek(tp0, 1);
    subscriptions.seek(tp1, 1);
    subscriptions.seek(tp2, 1);
    subscriptions.seek(tp3, 1);
    assertEquals(1, fetcher.sendFetches());
    Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
    partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
    partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
    partitions.put(tidp2, new FetchResponseData.PartitionData().setPartitionIndex(tp2.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(nextRecords));
    partitions.put(tidp3, new FetchResponseData.PartitionData().setPartitionIndex(tp3.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(partialRecords));
    client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
    consumerClient.poll(time.timer(0));
    List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
    for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1);
    assertEquals(4, subscriptions.position(tp1).offset);
    assertEquals(3, fetchedRecords.size());
    List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>();
    try {
        recordsByPartition = fetchedRecords();
        for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    } catch (OffsetOutOfRangeException oor) {
        oorExceptions.add(oor);
    }
    // Should have received one OffsetOutOfRangeException for partition tp1
    assertEquals(1, oorExceptions.size());
    OffsetOutOfRangeException oor = oorExceptions.get(0);
    assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0));
    assertEquals(oor.offsetOutOfRangePartitions().size(), 1);
    recordsByPartition = fetchedRecords();
    for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
    // Should not have received an Exception for tp2.
    assertEquals(6, subscriptions.position(tp2).offset);
    assertEquals(5, fetchedRecords.size());
    int numExceptionsExpected = 3;
    List<KafkaException> kafkaExceptions = new ArrayList<>();
    for (int i = 1; i <= numExceptionsExpected; i++) {
        try {
            recordsByPartition = fetchedRecords();
            for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
        } catch (KafkaException e) {
            kafkaExceptions.add(e);
        }
    }
    // Should have received as much as numExceptionsExpected Kafka exceptions for tp3.
    assertEquals(numExceptionsExpected, kafkaExceptions.size());
}
Also used : ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) LinkedHashMap(java.util.LinkedHashMap) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) KafkaException(org.apache.kafka.common.KafkaException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Test(org.junit.jupiter.api.Test)

Aggregations

OffsetOutOfRangeException (org.apache.kafka.clients.consumer.OffsetOutOfRangeException)7 LinkedHashMap (java.util.LinkedHashMap)5 ArrayList (java.util.ArrayList)4 TopicPartition (org.apache.kafka.common.TopicPartition)4 Map (java.util.Map)3 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)3 KafkaException (org.apache.kafka.common.KafkaException)3 PartitionData (org.apache.kafka.common.requests.FetchRequest.PartitionData)3 FetchResponse (org.apache.kafka.common.requests.FetchResponse)3 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)3 Test (org.junit.jupiter.api.Test)3 HashMap (java.util.HashMap)2 List (java.util.List)2 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)2 RecordTooLargeException (org.apache.kafka.common.errors.RecordTooLargeException)2 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)2 Errors (org.apache.kafka.common.protocol.Errors)2 Supplier (com.google.common.base.Supplier)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 Committer (io.druid.data.input.Committer)1