use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testFetchResponseMetrics.
@Test
public void testFetchResponseMetrics() {
buildFetcher();
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
subscriptions.assignFromUser(mkSet(tp1, tp2));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
topicIds.put(topic1, Uuid.randomUuid());
topicIds.put(topic2, Uuid.randomUuid());
TopicIdPartition tidp1 = new TopicIdPartition(topicIds.get(topic1), tp1);
TopicIdPartition tidp2 = new TopicIdPartition(topicIds.get(topic2), tp2);
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, tp -> validLeaderEpoch, topicIds));
int expectedBytes = 0;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicIdPartition tp : mkSet(tidp1, tidp2)) {
subscriptions.seek(tp.topicPartition(), 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(15).setLogStartOffset(0).setRecords(records));
}
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testMissingLeaderEpochInRecords.
@Test
public void testMissingLeaderEpochInRecords() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, System.currentTimeMillis(), RecordBatch.NO_PARTITION_LEADER_EPOCH);
builder.append(0L, "key".getBytes(), "1".getBytes());
builder.append(0L, "key".getBytes(), "2".getBytes());
MemoryRecords records = builder.build();
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
assertTrue(partitionRecords.containsKey(tp0));
assertEquals(2, partitionRecords.get(tp0).size());
for (ConsumerRecord<byte[], byte[]> record : partitionRecords.get(tp0)) {
assertEquals(Optional.empty(), record.leaderEpoch());
}
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testHeaders.
@Test
public void testHeaders() {
buildFetcher();
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
builder.append(0L, "key".getBytes(), "value-1".getBytes());
Header[] headersArray = new Header[1];
headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
Header[] headersArray2 = new Header[2];
headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
MemoryRecords memoryRecords = builder.build();
List<ConsumerRecord<byte[], byte[]>> records;
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
client.prepareResponse(matchesOffset(tidp0, 1), fullFetchResponse(tidp0, memoryRecords, Errors.NONE, 100L, 0));
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
records = recordsByPartition.get(tp0);
assertEquals(3, records.size());
Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
ConsumerRecord<byte[], byte[]> record = recordIterator.next();
assertNull(record.headers().lastHeader("headerKey"));
record = recordIterator.next();
assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
record = recordIterator.next();
assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testFetcherIgnoresControlRecords.
@Test
public void testFetcherIgnoresControlRecords() {
buildFetcher();
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
long producerId = 1;
short producerEpoch = 0;
int baseSequence = 0;
int partitionLeaderEpoch = 0;
ByteBuffer buffer = ByteBuffer.allocate(1024);
MemoryRecordsBuilder builder = MemoryRecords.idempotentBuilder(buffer, CompressionType.NONE, 0L, producerId, producerEpoch, baseSequence);
builder.append(0L, "key".getBytes(), null);
builder.close();
MemoryRecords.writeEndTransactionalMarker(buffer, 1L, time.milliseconds(), partitionLeaderEpoch, producerId, producerEpoch, new EndTransactionMarker(ControlRecordType.ABORT, 0));
buffer.flip();
client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
assertTrue(partitionRecords.containsKey(tp0));
List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp0);
assertEquals(1, records.size());
assertEquals(2L, subscriptions.position(tp0).offset);
ConsumerRecord<byte[], byte[]> record = records.get(0);
assertArrayEquals("key".getBytes(), record.key());
}
use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.
the class FetcherTest method testFetchResponseMetricsWithOnePartitionAtTheWrongOffset.
@Test
public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 0);
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
// send the fetch and then seek to a new offset
assertEquals(1, fetcher.sendFetches());
subscriptions.seek(tp1, 5);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(records));
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
fetcher.collectFetch();
// we should have ignored the record at the wrong offset
int expectedBytes = 0;
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON);
}
Aggregations