use of org.apache.kafka.clients.consumer.OffsetOutOfRangeException in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchPositionAfterException.
@Test
public void testFetchPositionAfterException() {
// verify the advancement in the next fetch offset equals to the number of fetched records when
// some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
subscriptionsNoAutoReset.assignFromUser(Utils.mkSet(tp0, tp1));
subscriptionsNoAutoReset.seek(tp0, 1);
subscriptionsNoAutoReset.seek(tp1, 1);
assertEquals(1, fetcherNoAutoReset.sendFetches());
Map<TopicPartition, FetchResponse.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tp1, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, records));
partitions.put(tp0, new FetchResponse.PartitionData(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, FetchResponse.INVALID_LOG_START_OFFSET, null, MemoryRecords.EMPTY));
client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
consumerClient.poll(0);
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
List<OffsetOutOfRangeException> exceptions = new ArrayList<>();
for (List<ConsumerRecord<byte[], byte[]>> records : fetcherNoAutoReset.fetchedRecords().values()) fetchedRecords.addAll(records);
assertEquals(fetchedRecords.size(), subscriptionsNoAutoReset.position(tp1) - 1);
try {
for (List<ConsumerRecord<byte[], byte[]>> records : fetcherNoAutoReset.fetchedRecords().values()) fetchedRecords.addAll(records);
} catch (OffsetOutOfRangeException e) {
exceptions.add(e);
}
assertEquals(4, subscriptionsNoAutoReset.position(tp1).longValue());
assertEquals(3, fetchedRecords.size());
// Should have received one OffsetOutOfRangeException for partition tp1
assertEquals(1, exceptions.size());
OffsetOutOfRangeException e = exceptions.get(0);
assertTrue(e.offsetOutOfRangePartitions().containsKey(tp0));
assertEquals(e.offsetOutOfRangePartitions().size(), 1);
}
use of org.apache.kafka.clients.consumer.OffsetOutOfRangeException in project kafka by apache.
the class FetcherTest method testCompletedFetchRemoval.
@Test
public void testCompletedFetchRemoval() {
// Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records.
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0, tp1, tp2, tp3));
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 1);
subscriptions.seek(tp2, 1);
subscriptions.seek(tp3, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
partitions.put(tidp2, new FetchResponseData.PartitionData().setPartitionIndex(tp2.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(nextRecords));
partitions.put(tidp3, new FetchResponseData.PartitionData().setPartitionIndex(tp3.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(partialRecords));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, fetchedRecords.size());
List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>();
try {
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
} catch (OffsetOutOfRangeException oor) {
oorExceptions.add(oor);
}
// Should have received one OffsetOutOfRangeException for partition tp1
assertEquals(1, oorExceptions.size());
OffsetOutOfRangeException oor = oorExceptions.get(0);
assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0));
assertEquals(oor.offsetOutOfRangePartitions().size(), 1);
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
// Should not have received an Exception for tp2.
assertEquals(6, subscriptions.position(tp2).offset);
assertEquals(5, fetchedRecords.size());
int numExceptionsExpected = 3;
List<KafkaException> kafkaExceptions = new ArrayList<>();
for (int i = 1; i <= numExceptionsExpected; i++) {
try {
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
} catch (KafkaException e) {
kafkaExceptions.add(e);
}
}
// Should have received as much as numExceptionsExpected Kafka exceptions for tp3.
assertEquals(numExceptionsExpected, kafkaExceptions.size());
}
Aggregations