use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testFetchNonContinuousRecords.
@Test
public void testFetchNonContinuousRecords() {
// if we are fetching from a compacted topic, there may be gaps in the returned records
// this test verifies the fetcher updates the current fetched/consumed positions correctly for this case
buildFetcher();
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
builder.appendWithOffset(15L, 0L, "key".getBytes(), "value-1".getBytes());
builder.appendWithOffset(20L, 0L, "key".getBytes(), "value-2".getBytes());
builder.appendWithOffset(30L, 0L, "key".getBytes(), "value-3".getBytes());
MemoryRecords records = builder.build();
List<ConsumerRecord<byte[], byte[]>> consumerRecords;
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
consumerRecords = recordsByPartition.get(tp0);
assertEquals(3, consumerRecords.size());
// this is the next fetching position
assertEquals(31L, subscriptions.position(tp0).offset);
assertEquals(15L, consumerRecords.get(0).offset());
assertEquals(20L, consumerRecords.get(1).offset());
assertEquals(30L, consumerRecords.get(2).offset());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testMultipleAbortMarkers.
@Test
public void testMultipleAbortMarkers() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(actuallyCommittedKeys, committedKeys);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testParseInvalidRecordBatch.
@Test
public void testParseInvalidRecordBatch() {
buildFetcher();
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBuffer buffer = records.buffer();
// flip some bits to fail the crc
buffer.putInt(32, buffer.get(32) ^ 87238423);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
try {
fetcher.collectFetch();
fail("fetchedRecords should have raised");
} catch (KafkaException e) {
// the position should not advance since no data has been returned
assertEquals(0, subscriptions.position(tp0).offset);
}
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method makeFetchRequestWithIncompleteRecord.
private void makeFetchRequestWithIncompleteRecord() {
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
MemoryRecords partialRecord = MemoryRecords.readableRecords(ByteBuffer.wrap(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }));
client.prepareResponse(fullFetchResponse(tidp0, partialRecord, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class FetcherTest method testFetchResponseMetricsPartialResponse.
@Test
public void testFetchResponseMetricsPartialResponse() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
int expectedBytes = 0;
for (Record record : records.records()) {
if (record.offset() >= 1)
expectedBytes += record.sizeInBytes();
}
fetchRecords(tidp0, records, Errors.NONE, 100L, 0);
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(2, (Double) recordsCountAverage.metricValue(), EPSILON);
}
Aggregations