Search in sources :

Example 21 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class ProduceRequestTest method testBuildWithOldMessageFormat.

@Test
public void testBuildWithOldMessageFormat() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    builder.append(10L, null, "a".getBytes());
    Map<TopicPartition, MemoryRecords> produceData = new HashMap<>();
    produceData.put(new TopicPartition("test", 0), builder.build());
    ProduceRequest.Builder requestBuilder = ProduceRequest.Builder.forMagic(RecordBatch.MAGIC_VALUE_V1, (short) 1, 5000, produceData, null);
    assertEquals(2, requestBuilder.oldestAllowedVersion());
    assertEquals(2, requestBuilder.latestAllowedVersion());
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 22 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetcherMetrics.

/*
     * Send multiple requests. Verify that the client side quota metrics have the right values
     */
@Test
public void testFetcherMetrics() {
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
    Map<String, String> tags = new HashMap<>();
    tags.put("topic", tp0.topic());
    tags.put("partition", String.valueOf(tp0.partition()));
    MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
    // recordsFetchLagMax should be initialized to negative infinity
    assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON);
    // recordsFetchLagMax should be hw - fetchOffset after receiving an empty FetchResponse
    fetchRecords(tp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 0);
    assertEquals(100, recordsFetchLagMax.value(), EPSILON);
    KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
    assertEquals(100, partitionLag.value(), EPSILON);
    // recordsFetchLagMax should be hw - offset of the last message after receiving a non-empty FetchResponse
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    fetchRecords(tp0, builder.build(), Errors.NONE, 200L, 0);
    assertEquals(197, recordsFetchLagMax.value(), EPSILON);
    assertEquals(197, partitionLag.value(), EPSILON);
    // verify de-registration of partition lag
    subscriptions.unsubscribe();
    assertFalse(allMetrics.containsKey(partitionLagMetric));
}
Also used : MetricName(org.apache.kafka.common.MetricName) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) Test(org.junit.Test)

Example 23 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchResponseMetricsWithOnePartitionError.

@Test
public void testFetchResponseMetricsWithOnePartitionError() {
    subscriptions.assignFromUser(Utils.mkSet(tp0, tp1));
    subscriptions.seek(tp0, 0);
    subscriptions.seek(tp1, 0);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    MemoryRecords records = builder.build();
    Map<TopicPartition, FetchResponse.PartitionData> partitions = new HashMap<>();
    partitions.put(tp0, new FetchResponse.PartitionData(Errors.NONE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
    partitions.put(tp1, new FetchResponse.PartitionData(Errors.OFFSET_OUT_OF_RANGE, 100, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, MemoryRecords.EMPTY));
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(new FetchResponse(Errors.NONE, new LinkedHashMap<>(partitions), 0, INVALID_SESSION_ID));
    consumerClient.poll(0);
    fetcher.fetchedRecords();
    int expectedBytes = 0;
    for (Record record : records.records()) expectedBytes += record.sizeInBytes();
    assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
    assertEquals(3, recordsCountAverage.value(), EPSILON);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) FetchResponse(org.apache.kafka.common.requests.FetchResponse) LinkedHashMap(java.util.LinkedHashMap) MetricName(org.apache.kafka.common.MetricName) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 24 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testHeaders.

@Test
public void testHeaders() {
    Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(time));
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(0L, "key".getBytes(), "value-1".getBytes());
    Header[] headersArray = new Header[1];
    headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray);
    Header[] headersArray2 = new Header[2];
    headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8));
    headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8));
    builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2);
    MemoryRecords memoryRecords = builder.build();
    List<ConsumerRecord<byte[], byte[]>> records;
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 1);
    client.prepareResponse(matchesOffset(tp0, 1), fullFetchResponse(tp0, memoryRecords, Errors.NONE, 100L, 0));
    assertEquals(1, fetcher.sendFetches());
    consumerClient.poll(0);
    records = fetcher.fetchedRecords().get(tp0);
    assertEquals(3, records.size());
    Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator();
    ConsumerRecord<byte[], byte[]> record = recordIterator.next();
    assertNull(record.headers().lastHeader("headerKey"));
    record = recordIterator.next();
    assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
    record = recordIterator.next();
    assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8));
    assertEquals("headerKey", record.headers().lastHeader("headerKey").key());
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ResponseHeader(org.apache.kafka.common.requests.ResponseHeader) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 25 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testFetchResponseMetrics.

@Test
public void testFetchResponseMetrics() {
    String topic1 = "foo";
    String topic2 = "bar";
    TopicPartition tp1 = new TopicPartition(topic1, 0);
    TopicPartition tp2 = new TopicPartition(topic2, 0);
    Map<String, Integer> partitionCounts = new HashMap<>();
    partitionCounts.put(topic1, 1);
    partitionCounts.put(topic2, 1);
    Cluster cluster = TestUtils.clusterWith(1, partitionCounts);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    subscriptions.assignFromUser(Utils.mkSet(tp1, tp2));
    int expectedBytes = 0;
    LinkedHashMap<TopicPartition, FetchResponse.PartitionData> fetchPartitionData = new LinkedHashMap<>();
    for (TopicPartition tp : Utils.mkSet(tp1, tp2)) {
        subscriptions.seek(tp, 0);
        MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
        for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
        MemoryRecords records = builder.build();
        for (Record record : records.records()) expectedBytes += record.sizeInBytes();
        fetchPartitionData.put(tp, new FetchResponse.PartitionData(Errors.NONE, 15L, FetchResponse.INVALID_LAST_STABLE_OFFSET, 0L, null, records));
    }
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(new FetchResponse(Errors.NONE, fetchPartitionData, 0, INVALID_SESSION_ID));
    consumerClient.poll(0);
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
    assertEquals(3, fetchedRecords.get(tp1).size());
    assertEquals(3, fetchedRecords.get(tp2).size());
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
    KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
    assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON);
    assertEquals(6, recordsCountAverage.value(), EPSILON);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Cluster(org.apache.kafka.common.Cluster) FetchResponse(org.apache.kafka.common.requests.FetchResponse) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) LinkedHashMap(java.util.LinkedHashMap) MetricName(org.apache.kafka.common.MetricName) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Record(org.apache.kafka.common.record.Record) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) List(java.util.List) ArrayList(java.util.ArrayList) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Aggregations

MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)96 ByteBuffer (java.nio.ByteBuffer)50 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)36 TopicPartition (org.apache.kafka.common.TopicPartition)29 HashMap (java.util.HashMap)26 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)25 Test (org.junit.jupiter.api.Test)25 Test (org.junit.Test)20 ArrayList (java.util.ArrayList)17 List (java.util.List)17 Record (org.apache.kafka.common.record.Record)17 LinkedHashMap (java.util.LinkedHashMap)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)14 MetricName (org.apache.kafka.common.MetricName)14 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)14 Arrays.asList (java.util.Arrays.asList)10 CompressionType (org.apache.kafka.common.record.CompressionType)10 RecordBatch (org.apache.kafka.common.record.RecordBatch)10 Header (org.apache.kafka.common.header.Header)9 EndTransactionMarker (org.apache.kafka.common.record.EndTransactionMarker)9