Search in sources :

Example 56 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class FetcherTest method appendTransactionalRecords.

private int appendTransactionalRecords(ByteBuffer buffer, long pid, long baseOffset, int baseSequence, SimpleRecord... records) {
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, baseOffset, time.milliseconds(), pid, (short) 0, baseSequence, true, RecordBatch.NO_PARTITION_LEADER_EPOCH);
    for (SimpleRecord record : records) {
        builder.append(record);
    }
    builder.build();
    return records.length;
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder)

Example 57 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class FetcherTest method testTruncationDetected.

@Test
public void testTruncationDetected() {
    // Create some records that include a leader epoch (1)
    MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, // record epoch is earlier than the leader epoch on the client
    1);
    builder.appendWithOffset(0L, 0L, "key".getBytes(), "value-1".getBytes());
    builder.appendWithOffset(1L, 0L, "key".getBytes(), "value-2".getBytes());
    builder.appendWithOffset(2L, 0L, "key".getBytes(), "value-3".getBytes());
    MemoryRecords records = builder.build();
    buildFetcher();
    assignFromUser(singleton(tp0));
    // Initialize the epoch=2
    Map<String, Integer> partitionCounts = new HashMap<>();
    partitionCounts.put(tp0.topic(), 4);
    MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), partitionCounts, tp -> 2, topicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
    // Offset validation requires OffsetForLeaderEpoch request v3 or higher
    Node node = metadata.fetch().nodes().get(0);
    apiVersions.update(node.idString(), NodeApiVersions.create());
    // Seek
    Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch(metadata.currentLeader(tp0).leader, Optional.of(1));
    subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.of(1), leaderAndEpoch));
    // Check for truncation, this should cause tp0 to go into validation
    fetcher.validateOffsetsIfNeeded();
    // No fetches sent since we entered validation
    assertEquals(0, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    assertTrue(subscriptions.awaitingValidation(tp0));
    // Prepare OffsetForEpoch response then check that we update the subscription position correctly.
    client.prepareResponse(prepareOffsetsForLeaderEpochResponse(tp0, Errors.NONE, 1, 10L));
    consumerClient.pollNoWakeup();
    assertFalse(subscriptions.awaitingValidation(tp0));
    // Fetch again, now it works
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
    consumerClient.pollNoWakeup();
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
    assertTrue(partitionRecords.containsKey(tp0));
    assertEquals(subscriptions.position(tp0).offset, 3L);
    assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1));
}
Also used : LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Metadata(org.apache.kafka.clients.Metadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 58 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class FetcherTest method testInvalidDefaultRecordBatch.

@Test
public void testInvalidDefaultRecordBatch() {
    buildFetcher();
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    ByteBufferOutputStream out = new ByteBufferOutputStream(buffer);
    MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, DefaultRecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024);
    builder.append(10L, "key".getBytes(), "value".getBytes());
    builder.close();
    buffer.flip();
    // Garble the CRC
    buffer.position(17);
    buffer.put("beef".getBytes());
    buffer.position(0);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
    consumerClient.poll(time.timer(0));
    // the fetchedRecords() should always throw exception due to the bad batch.
    for (int i = 0; i < 2; i++) {
        try {
            fetcher.collectFetch();
            fail("fetchedRecords should have raised KafkaException");
        } catch (KafkaException e) {
            assertEquals(0, subscriptions.position(tp0).offset);
        }
    }
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) KafkaException(org.apache.kafka.common.KafkaException) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 59 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class RecordAccumulatorTest method testSplitAndReenqueue.

@Test
public void testSplitAndReenqueue() throws ExecutionException, InterruptedException {
    long now = time.milliseconds();
    RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10);
    // Create a big batch
    ByteBuffer buffer = ByteBuffer.allocate(4096);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
    byte[] value = new byte[1024];
    final AtomicInteger acked = new AtomicInteger(0);
    Callback cb = new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            acked.incrementAndGet();
        }
    };
    // Append two messages so the batch is too big.
    Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
    Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
    assertNotNull(future1);
    assertNotNull(future2);
    batch.close();
    // Enqueue the batch to the accumulator as if the batch was created by the accumulator.
    accum.reenqueue(batch, now);
    time.sleep(101L);
    // Drain the batch.
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertTrue(result.readyNodes.size() > 0, "The batch should be ready");
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertEquals(1, drained.size(), "Only node1 should be drained");
    assertEquals(1, drained.get(node1.id()).size(), "Only one batch should be drained");
    // Split and reenqueue the batch.
    accum.splitAndReenqueue(drained.get(node1.id()).get(0));
    time.sleep(101L);
    drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertFalse(drained.isEmpty());
    assertFalse(drained.get(node1.id()).isEmpty());
    drained.get(node1.id()).get(0).complete(acked.get(), 100L);
    assertEquals(1, acked.get(), "The first message should have been acked.");
    assertTrue(future1.isDone());
    assertEquals(0, future1.get().offset());
    drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertFalse(drained.isEmpty());
    assertFalse(drained.get(node1.id()).isEmpty());
    drained.get(node1.id()).get(0).complete(acked.get(), 100L);
    assertEquals(2, acked.get(), "Both message should have been acked.");
    assertTrue(future2.isDone());
    assertEquals(1, future2.get().offset());
}
Also used : ByteBuffer(java.nio.ByteBuffer) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) Arrays.asList(java.util.Arrays.asList) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.jupiter.api.Test)

Example 60 with MemoryRecordsBuilder

use of org.apache.kafka.common.record.MemoryRecordsBuilder in project kafka by apache.

the class ProducerBatchTest method testSplitPreservesMagicAndCompressionType.

@Test
public void testSplitPreservesMagicAndCompressionType() {
    for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1, MAGIC_VALUE_V2)) {
        for (CompressionType compressionType : CompressionType.values()) {
            if (compressionType == CompressionType.NONE && magic < MAGIC_VALUE_V2)
                continue;
            if (compressionType == CompressionType.ZSTD && magic < MAGIC_VALUE_V2)
                continue;
            MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), magic, compressionType, TimestampType.CREATE_TIME, 0L);
            ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now);
            while (true) {
                FutureRecordMetadata future = batch.tryAppend(now, "hi".getBytes(), "there".getBytes(), Record.EMPTY_HEADERS, null, now);
                if (future == null)
                    break;
            }
            Deque<ProducerBatch> batches = batch.split(512);
            assertTrue(batches.size() >= 2);
            for (ProducerBatch splitProducerBatch : batches) {
                assertEquals(magic, splitProducerBatch.magic());
                assertTrue(splitProducerBatch.isSplitBatch());
                for (RecordBatch splitBatch : splitProducerBatch.records().batches()) {
                    assertEquals(magic, splitBatch.magic());
                    assertEquals(0L, splitBatch.baseOffset());
                    assertEquals(compressionType, splitBatch.compressionType());
                }
            }
        }
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) RecordBatch(org.apache.kafka.common.record.RecordBatch) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) CompressionType(org.apache.kafka.common.record.CompressionType) Test(org.junit.jupiter.api.Test)

Aggregations

MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)60 ByteBuffer (java.nio.ByteBuffer)28 TopicPartition (org.apache.kafka.common.TopicPartition)25 Test (org.junit.jupiter.api.Test)25 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)24 HashMap (java.util.HashMap)22 Test (org.junit.Test)20 LinkedHashMap (java.util.LinkedHashMap)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)14 MetricName (org.apache.kafka.common.MetricName)14 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)14 ArrayList (java.util.ArrayList)13 List (java.util.List)13 Record (org.apache.kafka.common.record.Record)11 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)11 Arrays.asList (java.util.Arrays.asList)10 LegacyRecord (org.apache.kafka.common.record.LegacyRecord)9 Collections.emptyList (java.util.Collections.emptyList)8 Collections.singletonList (java.util.Collections.singletonList)8 KafkaException (org.apache.kafka.common.KafkaException)7