Search in sources :

Example 46 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class ProduceRequestTest method testV3AndAboveShouldContainOnlyOneRecordBatch.

@Test
public void testV3AndAboveShouldContainOnlyOneRecordBatch() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    builder.append(10L, null, "a".getBytes());
    builder.close();
    builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 1L);
    builder.append(11L, "1".getBytes(), "b".getBytes());
    builder.append(12L, null, "c".getBytes());
    builder.close();
    buffer.flip();
    ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(MemoryRecords.readableRecords(buffer))))).iterator())).setAcks((short) 1).setTimeoutMs(5000));
    assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class);
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 47 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class ProduceRequestTest method testV3AndAboveCannotUseMagicV1.

@Test
public void testV3AndAboveCannotUseMagicV1() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V1, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    builder.append(10L, null, "a".getBytes());
    ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(builder.build())))).iterator())).setAcks((short) 1).setTimeoutMs(5000));
    assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class);
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 48 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class ProduceRequestTest method testV3AndAboveCannotUseMagicV0.

@Test
public void testV3AndAboveCannotUseMagicV0() {
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, CompressionType.NONE, TimestampType.NO_TIMESTAMP_TYPE, 0L);
    builder.append(10L, null, "a".getBytes());
    ProduceRequest.Builder requestBuilder = ProduceRequest.forCurrentMagic(new ProduceRequestData().setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList(new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(builder.build())))).iterator())).setAcks((short) 1).setTimeoutMs(5000));
    assertThrowsForAllVersions(requestBuilder, InvalidRecordException.class);
}
Also used : ProduceRequestData(org.apache.kafka.common.message.ProduceRequestData) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 49 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class FetcherTest method testFetchNonContinuousRecords.

@Test
public void testFetchNonContinuousRecords() {
    // if we are fetching from a compacted topic, there may be gaps in the returned records
    // this test verifies the fetcher updates the current fetched/consumed positions correctly for this case
    buildFetcher();
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    builder.appendWithOffset(15L, 0L, "key".getBytes(), "value-1".getBytes());
    builder.appendWithOffset(20L, 0L, "key".getBytes(), "value-2".getBytes());
    builder.appendWithOffset(30L, 0L, "key".getBytes(), "value-3".getBytes());
    MemoryRecords records = builder.build();
    List<ConsumerRecord<byte[], byte[]>> consumerRecords;
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
    consumerRecords = recordsByPartition.get(tp0);
    assertEquals(3, consumerRecords.size());
    // this is the next fetching position
    assertEquals(31L, subscriptions.position(tp0).offset);
    assertEquals(15L, consumerRecords.get(0).offset());
    assertEquals(20L, consumerRecords.get(1).offset());
    assertEquals(30L, consumerRecords.get(2).offset());
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 50 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class FetcherTest method testReadCommittedLagMetric.

@Test
public void testReadCommittedLagMetric() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax);
    Map<String, String> tags = new HashMap<>();
    tags.put("topic", tp0.topic());
    tags.put("partition", String.valueOf(tp0.partition()));
    MetricName partitionLagMetric = metrics.metricName("records-lag", metricGroup, tags);
    Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
    KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric);
    // recordsFetchLagMax should be initialized to NaN
    assertEquals(Double.NaN, (Double) recordsFetchLagMax.metricValue(), EPSILON);
    // recordsFetchLagMax should be lso - fetchOffset after receiving an empty FetchResponse
    fetchRecords(tidp0, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0);
    assertEquals(50, (Double) recordsFetchLagMax.metricValue(), EPSILON);
    KafkaMetric partitionLag = allMetrics.get(partitionLagMetric);
    assertEquals(50, (Double) partitionLag.metricValue(), EPSILON);
    // recordsFetchLagMax should be lso - offset of the last message after receiving a non-empty FetchResponse
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
    fetchRecords(tidp0, builder.build(), Errors.NONE, 200L, 150L, 0);
    assertEquals(147, (Double) recordsFetchLagMax.metricValue(), EPSILON);
    assertEquals(147, (Double) partitionLag.metricValue(), EPSILON);
    // verify de-registration of partition lag
    subscriptions.unsubscribe();
    fetcher.sendFetches();
    assertFalse(allMetrics.containsKey(partitionLagMetric));
}
Also used : MetricName(org.apache.kafka.common.MetricName) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) Test(org.junit.jupiter.api.Test)

Aggregations

MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)60 ByteBuffer (java.nio.ByteBuffer)28 TopicPartition (org.apache.kafka.common.TopicPartition)25 Test (org.junit.jupiter.api.Test)25 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)24 HashMap (java.util.HashMap)22 Test (org.junit.Test)20 LinkedHashMap (java.util.LinkedHashMap)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)14 MetricName (org.apache.kafka.common.MetricName)14 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)14 ArrayList (java.util.ArrayList)13 List (java.util.List)13 Record (org.apache.kafka.common.record.Record)11 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)11 Arrays.asList (java.util.Arrays.asList)10 LegacyRecord (org.apache.kafka.common.record.LegacyRecord)9 Collections.emptyList (java.util.Collections.emptyList)8 Collections.singletonList (java.util.Collections.singletonList)8 KafkaException (org.apache.kafka.common.KafkaException)7