Search in sources :

Example 21 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testHeaders.

@Test
public void testHeaders() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());
    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
    MemoryRecords memoryRecords = builder.build();
    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, memoryRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(3, records.size());
    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));
    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 22 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method makeFetchRequestWithIncompleteRecord.

private void makeFetchRequestWithIncompleteRecord() {
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    MemoryRecords partialRecord = MemoryRecords.readableRecords(ByteBuffer.wrap(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0 }));
    client.prepareResponse(fullFetchResponse(tp0, partialRecord, Errors.NONE, 100L, 0));
    consumerClient.poll(0);
    assertTrue(fetcher.hasCompletedFetches());
}
Also used : MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 23 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchResponseMetrics.

@Test
public void testFetchResponseMetrics() {
    String topic1 = "foo";
    String topic2 = "bar";
    TopicPartition tp1 = new TopicPartition(topic1, 0);
    TopicPartition tp2 = new TopicPartition(topic2, 0);
    Map<String, Integer> partitionCounts = new HashMap<>();
    partitionCounts.put(topic1, 1);
    partitionCounts.put(topic2, 1);
    Cluster cluster = TestUtils.clusterWith(1, partitionCounts);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    subscriptions.assignFromUser(Utils.mkSet(tp1, tp2));
    int expectedBytes = 0;
    LinkedHashMap<TopicPartition, FetchResponse.PartitionData> fetchPartitionData = new LinkedHashMap<>();
    for (TopicPartition tp : Utils.mkSet(tp1, tp2)) {
        subscriptions.seek(tp, 0);
        MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
        for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
        MemoryRecords records = builder.build();
        for (Record record : records.records()) expectedBytes += record.sizeInBytes();
        fetchPartitionData.put(tp, new FetchResponse.PartitionData(Errors.NONE, 15L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
    }
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(new FetchResponse(Errors.NONE, fetchPartitionData, 0, INVALID_SESSION_ID));
    consumerClient.poll(0);
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertEquals(3, fetchedRecords.get(tp1).size());
    assertEquals(3, fetchedRecords.get(tp2).size());
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
    assertEquals(6, recordsCountAverage.value(), EPSILON);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Cluster(org.apache.kafka.common.Cluster) FetchResponse(org.apache.kafka.common.requests.FetchResponse) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) LinkedHashMap(java.util.LinkedHashMap) MetricName(org.apache.kafka.common.MetricName) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 24 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project apache-kafka-on-k8s by banzaicloud.

the class Sender method sendProduceRequest.

/**
 * Create a produce request from the given record batches
 */
private void sendProduceRequest(long now, int destination, short acks, int timeout, List<ProducerBatch> batches) {
    if (batches.isEmpty())
        return;
    Map<TopicPartition, MemoryRecords> produceRecordsByPartition = new HashMap<>(batches.size());
    final Map<TopicPartition, ProducerBatch> recordsByPartition = new HashMap<>(batches.size());
    // find the minimum magic version used when creating the record sets
    byte minUsedMagic = apiVersions.maxUsableProduceMagic();
    for (ProducerBatch batch : batches) {
        if (batch.magic() < minUsedMagic)
            minUsedMagic = batch.magic();
    }
    for (ProducerBatch batch : batches) {
        TopicPartition tp = batch.topicPartition;
        MemoryRecords records = batch.records();
        // which is supporting the new magic version to one which doesn't, then we will need to convert.
        if (!records.hasMatchingMagic(minUsedMagic))
            records = batch.records().downConvert(minUsedMagic, 0, time).records();
        produceRecordsByPartition.put(tp, records);
        recordsByPartition.put(tp, batch);
    }
    String transactionalId = null;
    if (transactionManager != null && transactionManager.isTransactional()) {
        transactionalId = transactionManager.transactionalId();
    }
    ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forMagic(minUsedMagic, acks, timeout, produceRecordsByPartition, transactionalId);
    RequestCompletionHandler callback = new RequestCompletionHandler() {

        public void onComplete(ClientResponse response) {
            handleProduceResponse(response, recordsByPartition, time.milliseconds());
        }
    };
    String nodeId = Integer.toString(destination);
    ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, callback);
    client.send(clientRequest, now);
    log.trace("Sent produce request to {}: {}", nodeId, requestBuilder);
}
Also used : ClientResponse(org.apache.kafka.clients.ClientResponse) HashMap(java.util.HashMap) ProduceRequest(org.apache.kafka.common.requests.ProduceRequest) RequestCompletionHandler(org.apache.kafka.clients.RequestCompletionHandler) TopicPartition(org.apache.kafka.common.TopicPartition) ClientRequest(org.apache.kafka.clients.ClientRequest) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 25 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class SendBuilderTest method getRecords.

private MemoryRecords getRecords(ByteBuffer buffer, int size) {
    int initialPosition = buffer.position();
    int initialLimit = buffer.limit();
    int recordsLimit = initialPosition + size;
    buffer.limit(recordsLimit);
    MemoryRecords records = MemoryRecords.readableRecords(buffer.slice());
    buffer.position(recordsLimit);
    buffer.limit(initialLimit);
    return records;
}
Also used : MemoryRecords(org.apache.kafka.common.record.MemoryRecords) UnalignedMemoryRecords(org.apache.kafka.common.record.UnalignedMemoryRecords)

Aggregations

MemoryRecords (org.apache.kafka.common.record.MemoryRecords)108 TopicPartition (org.apache.kafka.common.TopicPartition)59 Test (org.junit.jupiter.api.Test)43 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)40 ByteBuffer (java.nio.ByteBuffer)34 ArrayList (java.util.ArrayList)28 List (java.util.List)27 Test (org.junit.Test)27 HashMap (java.util.HashMap)26 LinkedHashMap (java.util.LinkedHashMap)23 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)23 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)18 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)16 Collections.singletonList (java.util.Collections.singletonList)15 Record (org.apache.kafka.common.record.Record)15 Arrays.asList (java.util.Arrays.asList)14 Collections.emptyList (java.util.Collections.emptyList)14 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)14 Metrics (org.apache.kafka.common.metrics.Metrics)12 MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)11