use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class FetcherTest method testFetchPositionAfterException.
@Test
public void testFetchPositionAfterException() {
// verify the advancement in the next fetch offset equals to the number of fetched records when
// some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>();
fetchRecordsInto(allFetchedRecords);
assertEquals(1, subscriptions.position(tp0).offset);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, allFetchedRecords.size());
OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> fetchRecordsInto(allFetchedRecords));
assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet());
assertEquals(1L, e.offsetOutOfRangePartitions().get(tp0).longValue());
assertEquals(1, subscriptions.position(tp0).offset);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, allFetchedRecords.size());
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class FetcherTest method testUpdatePositionWithLastRecordMissingFromBatch.
@Test
public void testUpdatePositionWithLastRecordMissingFromBatch() {
buildFetcher();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), new SimpleRecord("1".getBytes(), "v".getBytes()), new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes()));
// Remove the last record to simulate compaction
MemoryRecords.FilterResult result = records.filterTo(tp0, new MemoryRecords.RecordFilter(0, 0) {
@Override
protected BatchRetentionResult checkBatchRetention(RecordBatch batch) {
return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false);
}
@Override
protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) {
return record.key() != null;
}
}, ByteBuffer.allocate(1024), Integer.MAX_VALUE, BufferSupplier.NO_CACHING);
result.outputBuffer().flip();
MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer());
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, compactedRecords, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> allFetchedRecords = fetchedRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
for (int i = 0; i < 3; i++) {
assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key()));
}
// The next offset should point to the next batch
assertEquals(4L, subscriptions.position(tp0).offset);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class FetcherTest method testFetchWithNoTopicId.
@Test
public void testFetchWithNoTopicId() {
// Should work and default to using old request type.
buildFetcher();
TopicIdPartition noId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("noId", 0));
assignFromUserNoId(singleton(noId.topicPartition()));
subscriptions.seek(noId.topicPartition(), 0);
// Fetch should use request version 12
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fetchRequestMatcher((short) 12, noId, 0, Optional.of(validLeaderEpoch)), fullFetchResponse(noId, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
assertTrue(partitionRecords.containsKey(noId.topicPartition()));
List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(noId.topicPartition());
assertEquals(3, records.size());
// this is the next fetching position
assertEquals(4L, subscriptions.position(noId.topicPartition()).offset);
long offset = 1;
for (ConsumerRecord<byte[], byte[]> record : records) {
assertEquals(offset, record.offset());
offset += 1;
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class FetcherTest method testReadCommittedWithCommittedAndAbortedTransactions.
@Test
public void testReadCommittedWithCommittedAndAbortedTransactions() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
List<FetchResponseData.AbortedTransaction> abortedTransactions = new ArrayList<>();
long pid1 = 1L;
long pid2 = 2L;
// Appends for producer 1 (eventually committed)
appendTransactionalRecords(buffer, pid1, 0L, new SimpleRecord("commit1-1".getBytes(), "value".getBytes()), new SimpleRecord("commit1-2".getBytes(), "value".getBytes()));
// Appends for producer 2 (eventually aborted)
appendTransactionalRecords(buffer, pid2, 2L, new SimpleRecord("abort2-1".getBytes(), "value".getBytes()));
// commit producer 1
commitTransaction(buffer, pid1, 3L);
// append more for producer 2 (eventually aborted)
appendTransactionalRecords(buffer, pid2, 4L, new SimpleRecord("abort2-2".getBytes(), "value".getBytes()));
// abort producer 2
abortTransaction(buffer, pid2, 5L);
abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(2L));
// New transaction for producer 1 (eventually aborted)
appendTransactionalRecords(buffer, pid1, 6L, new SimpleRecord("abort1-1".getBytes(), "value".getBytes()));
// New transaction for producer 2 (eventually committed)
appendTransactionalRecords(buffer, pid2, 7L, new SimpleRecord("commit2-1".getBytes(), "value".getBytes()));
// Add messages for producer 1 (eventually aborted)
appendTransactionalRecords(buffer, pid1, 8L, new SimpleRecord("abort1-2".getBytes(), "value".getBytes()));
// abort producer 1
abortTransaction(buffer, pid1, 9L);
abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(6));
// commit producer 2
commitTransaction(buffer, pid2, 10L);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
// There are only 3 committed records
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> fetchedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
fetchedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(mkSet("commit1-1", "commit1-2", "commit2-1"), fetchedKeys);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project kafka by apache.
the class InMemoryWindowStore method init.
@Deprecated
@Override
public void init(final ProcessorContext context, final StateStore root) {
this.context = context;
final StreamsMetricsImpl metrics = ProcessorContextUtils.getMetricsImpl(context);
final String threadId = Thread.currentThread().getName();
final String taskName = context.taskId().toString();
expiredRecordSensor = TaskMetrics.droppedRecordsSensor(threadId, taskName, metrics);
if (root != null) {
final boolean consistencyEnabled = StreamsConfig.InternalConfig.getBoolean(context.appConfigs(), IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, false);
context.register(root, (RecordBatchingStateRestoreCallback) records -> {
for (final ConsumerRecord<byte[], byte[]> record : records) {
put(Bytes.wrap(extractStoreKeyBytes(record.key())), record.value(), extractStoreTimestamp(record.key()));
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(record, consistencyEnabled, position);
}
});
}
open = true;
}
Aggregations