use of org.apache.kafka.common.record.Records in project kafka by apache.
the class FetchResponse method addPartitionData.
private static void addPartitionData(String dest, List<Send> sends, Struct partitionData) {
Struct header = partitionData.getStruct(PARTITION_HEADER_KEY_NAME);
Records records = partitionData.getRecords(RECORD_SET_KEY_NAME);
// include the partition header and the size of the record set
ByteBuffer buffer = ByteBuffer.allocate(header.sizeOf() + 4);
header.writeTo(buffer);
buffer.putInt(records.sizeInBytes());
buffer.rewind();
sends.add(new ByteBufferSend(dest, buffer));
// finally the send for the record set itself
sends.add(new RecordsSend(dest, records));
}
use of org.apache.kafka.common.record.Records in project apache-kafka-on-k8s by banzaicloud.
the class FetchResponse method addPartitionData.
private static void addPartitionData(String dest, Queue<Send> sends, Struct partitionData) {
Struct header = partitionData.getStruct(PARTITION_HEADER_KEY_NAME);
Records records = partitionData.getRecords(RECORD_SET_KEY_NAME);
// include the partition header and the size of the record set
ByteBuffer buffer = ByteBuffer.allocate(header.sizeOf() + 4);
header.writeTo(buffer);
buffer.putInt(records.sizeInBytes());
buffer.rewind();
sends.add(new ByteBufferSend(dest, buffer));
// finally the send for the record set itself
sends.add(new RecordsSend(dest, records));
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class FetcherTest method testFetchResponseMetrics.
@Test
public void testFetchResponseMetrics() {
buildFetcher();
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
subscriptions.assignFromUser(mkSet(tp1, tp2));
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
topicIds.put(topic1, Uuid.randomUuid());
topicIds.put(topic2, Uuid.randomUuid());
TopicIdPartition tidp1 = new TopicIdPartition(topicIds.get(topic1), tp1);
TopicIdPartition tidp2 = new TopicIdPartition(topicIds.get(topic2), tp2);
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, tp -> validLeaderEpoch, topicIds));
int expectedBytes = 0;
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicIdPartition tp : mkSet(tidp1, tidp2)) {
subscriptions.seek(tp.topicPartition(), 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(15).setLogStartOffset(0).setRecords(records));
}
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData));
consumerClient.poll(time.timer(0));
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(6, (Double) recordsCountAverage.metricValue(), EPSILON);
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class MockLog method read.
@Override
public LogFetchInfo read(long startOffset, Isolation isolation) {
OptionalLong maxOffsetOpt = isolation == Isolation.COMMITTED ? OptionalLong.of(highWatermark.offset) : OptionalLong.empty();
verifyOffsetInRange(startOffset);
long maxOffset = maxOffsetOpt.orElse(endOffset().offset);
if (startOffset >= maxOffset) {
return new LogFetchInfo(MemoryRecords.EMPTY, new LogOffsetMetadata(startOffset, metadataForOffset(startOffset)));
}
ByteBuffer buffer = ByteBuffer.allocate(512);
int batchCount = 0;
LogOffsetMetadata batchStartOffset = null;
for (LogBatch batch : batches) {
// batch returned in a fetch response.
if (batch.lastOffset() >= startOffset && batch.lastOffset() < maxOffset && !batch.entries.isEmpty()) {
buffer = batch.writeTo(buffer);
if (batchStartOffset == null) {
batchStartOffset = batch.entries.get(0).logOffsetMetadata();
}
// Read on the mock log should return at most 2 batches. This is a simple solution
// for testing interesting partial read scenarios.
batchCount += 1;
if (batchCount >= 2) {
break;
}
}
}
buffer.flip();
Records records = MemoryRecords.readableRecords(buffer);
if (batchStartOffset == null) {
throw new RuntimeException("Expected to find at least one entry starting from offset " + startOffset + " but found none");
}
return new LogFetchInfo(records, batchStartOffset);
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class MockLogTest method testReadRecords.
@Test
public void testReadRecords() {
int epoch = 2;
ByteBuffer recordOneBuffer = ByteBuffer.allocate(4);
recordOneBuffer.putInt(1);
SimpleRecord recordOne = new SimpleRecord(recordOneBuffer);
ByteBuffer recordTwoBuffer = ByteBuffer.allocate(4);
recordTwoBuffer.putInt(2);
SimpleRecord recordTwo = new SimpleRecord(recordTwoBuffer);
appendAsLeader(Arrays.asList(recordOne, recordTwo), epoch);
Records records = log.read(0, Isolation.UNCOMMITTED).records;
List<ByteBuffer> extractRecords = new ArrayList<>();
for (Record record : records.records()) {
extractRecords.add(record.value());
}
assertEquals(Arrays.asList(recordOne.value(), recordTwo.value()), extractRecords);
}
Aggregations