use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testHeaders.
@Test
public void testHeaders() {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
builder.append(0L, "key".getBytes(), "value-1".getBytes());
Header[] headersArray = new Header[1];
headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
Header[] headersArray2 = new Header[2];
headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
MemoryRecords memoryRecords = builder.build();
List<ConsumerRecord<byte[], byte[]>> records;
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 1);
client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, memoryRecords, Errors.NONE, 100L, 0));
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(0);
records = fetcher.fetchedRecords().get(tp0);
assertEquals(3, records.size());
Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
ConsumerRecord<byte[], byte[]> record = recordIterator.next();
assertNull(record.headers().lastHeader("headerKey"));
record = recordIterator.next();
assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
record = recordIterator.next();
assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method makeFetchRequestWithIncompleteRecord.
private void makeFetchRequestWithIncompleteRecord() {
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
MemoryRecords partialRecord = MemoryRecords.readableRecords(ByteBuffer.wrap(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }));
client.prepareResponse(fullFetchResponse(tp0, partialRecord, Errors.NONE, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testFetchResponseMetrics.
@Test
public void testFetchResponseMetrics() {
String topic1 = "foo";
String topic2 = "bar";
TopicPartition tp1 = new TopicPartition(topic1, 0);
TopicPartition tp2 = new TopicPartition(topic2, 0);
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put(topic1, 1);
partitionCounts.put(topic2, 1);
Cluster cluster = TestUtils.clusterWith(1, partitionCounts);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
subscriptions.assignFromUser(Utils.mkSet(tp1, tp2));
int expectedBytes = 0;
LinkedHashMap<TopicPartition, FetchResponse.PartitionData> fetchPartitionData = new LinkedHashMap<>();
for (TopicPartition tp : Utils.mkSet(tp1, tp2)) {
subscriptions.seek(tp, 0);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
fetchPartitionData.put(tp, new FetchResponse.PartitionData(Errors.NONE, 15L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
}
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(new FetchResponse(Errors.NONE, fetchPartitionData, 0, INVALID_SESSION_ID));
consumerClient.poll(0);
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
assertEquals(3, fetchedRecords.get(tp1).size());
assertEquals(3, fetchedRecords.get(tp2).size());
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
assertEquals(6, recordsCountAverage.value(), EPSILON);
}
use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.
the class Sender method sendProduceRequest.
/**
* Create a produce request from the given record batches
*/
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
if (batches.isEmpty())
return;
Map<TopicPartition, MemoryRecords> produceRecordsByPartition = new HashMap<>(batches.size());
final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
// find the minimum magic version used when creating the record sets
byte minUsedMagic = apiVersions.maxUsableProduceMagic();
for (ProducerBatch batch : batches) {
if (batch.magic() < minUsedMagic)
minUsedMagic = batch.magic();
}
for (ProducerBatch batch : batches) {
TopicPartition tp = batch.topicPartition;
MemoryRecords records = batch.records();
// which is supporting the new magic version to one which doesn't, then we will need to convert.
if (!records.hasMatchingMagic(minUsedMagic))
records = batch.records().downConvert(minUsedMagic, 0, time).records();
produceRecordsByPartition.put(tp, records);
recordsByPartition.put(tp, batch);
}
String transactionalId = null;
if (transactionManager != null && transactionManager.isTransactional()) {
transactionalId = transactionManager.transactionalId();
}
ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forMagic(minUsedMagic, acks, timeout, produceRecordsByPartition, transactionalId);
RequestCompletionHandler callback = new RequestCompletionHandler() {
public void onComplete(ClientResponse response) {
handleProduceResponse(response, recordsByPartition, time.milliseconds());
}
};
String nodeId = Integer.toString(destination);
ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, callback);
client.send(clientRequest, now);
log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.
the class SendBuilderTest method getRecords.
private MemoryRecords getRecords(ByteBuffer buffer, int size) {
int initialPosition = buffer.position();
int initialLimit = buffer.limit();
int recordsLimit = initialPosition + size;
buffer.limit(recordsLimit);
MemoryRecords records = MemoryRecords.readableRecords(buffer.slice());
buffer.position(recordsLimit);
buffer.limit(initialLimit);
return records;
}
Aggregations