Search in sources :

Example 46 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class FetcherTest method testReadCommittedWithCommittedAndAbortedTransactions.

@Test
public void testReadCommittedWithCommittedAndAbortedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    List<FetchResponseData.AbortedTransaction> abortedTransactions = new ArrayList<>();
    long pid1 = 1L;
    long pid2 = 2L;
    // Appends for producer 1 (eventually committed)
    appendTransactionalRecords(buffer, pid1, 0L, new SimpleRecord("commit1-1".getBytes(), "value".getBytes()), new SimpleRecord("commit1-2".getBytes(), "value".getBytes()));
    // Appends for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 2L, new SimpleRecord("abort2-1".getBytes(), "value".getBytes()));
    // commit producer 1
    commitTransaction(buffer, pid1, 3L);
    // append more for producer 2 (eventually aborted)
    appendTransactionalRecords(buffer, pid2, 4L, new SimpleRecord("abort2-2".getBytes(), "value".getBytes()));
    // abort producer 2
    abortTransaction(buffer, pid2, 5L);
    abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(pid2).setFirstOffset(2L));
    // New transaction for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 6L, new SimpleRecord("abort1-1".getBytes(), "value".getBytes()));
    // New transaction for producer 2 (eventually committed)
    appendTransactionalRecords(buffer, pid2, 7L, new SimpleRecord("commit2-1".getBytes(), "value".getBytes()));
    // Add messages for producer 1 (eventually aborted)
    appendTransactionalRecords(buffer, pid1, 8L, new SimpleRecord("abort1-2".getBytes(), "value".getBytes()));
    // abort producer 1
    abortTransaction(buffer, pid1, 9L);
    abortedTransactions.add(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(6));
    // commit producer 2
    commitTransaction(buffer, pid2, 10L);
    buffer.flip();
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    assertTrue(fetchedRecords.containsKey(tp0));
    // There are only 3 committed records
    List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
    Set<String> fetchedKeys = new HashSet<>();
    for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
        fetchedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
    }
    assertEquals(mkSet("commit1-1", "commit1-2", "commit2-1"), fetchedKeys);
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 47 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class FetcherTest method testConsumerPositionUpdatedWhenSkippingAbortedTransactions.

@Test
public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() {
    buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long currentOffset = 0;
    currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
    currentOffset += abortTransaction(buffer, 1L, currentOffset);
    buffer.flip();
    List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
    MemoryRecords records = MemoryRecords.readableRecords(buffer);
    assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    assertFalse(fetcher.hasCompletedFetches());
    client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
    consumerClient.poll(time.timer(0));
    assertTrue(fetcher.hasCompletedFetches());
    Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
    // Ensure that we don't return any of the aborted records, but yet advance the consumer position.
    assertFalse(fetchedRecords.containsKey(tp0));
    assertEquals(currentOffset, subscriptions.position(tp0).offset);
}
Also used : FetchResponseData(org.apache.kafka.common.message.FetchResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Collections.singletonList(java.util.Collections.singletonList) Arrays.asList(java.util.Arrays.asList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 48 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class UncompressedRecordBatchValidationBenchmark method measureAssignOffsetsNonCompressed.

@Benchmark
public void measureAssignOffsetsNonCompressed(Blackhole bh) {
    MemoryRecords records = MemoryRecords.readableRecords(singleBatchBuffer.duplicate());
    LogValidator.assignOffsetsNonCompressed(records, new TopicPartition("a", 0), new LongRef(startingOffset), System.currentTimeMillis(), false, TimestampType.CREATE_TIME, Long.MAX_VALUE, 0, new AppendOrigin.Client$(), messageVersion, brokerTopicStats);
}
Also used : AppendOrigin(kafka.log.AppendOrigin) TopicPartition(org.apache.kafka.common.TopicPartition) LongRef(kafka.common.LongRef) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Benchmark(org.openjdk.jmh.annotations.Benchmark)

Example 49 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class KafkaRaftClientTest method testLeaderAppendSingleMemberQuorum.

@Test
public void testLeaderAppendSingleMemberQuorum() throws Exception {
    int localId = 0;
    Set<Integer> voters = Collections.singleton(localId);
    RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build();
    long now = context.time.milliseconds();
    context.pollUntil(() -> context.log.endOffset().offset == 1L);
    context.assertElectedLeader(1, localId);
    // We still write the leader change message
    assertEquals(OptionalLong.of(1L), context.client.highWatermark());
    String[] appendRecords = new String[] { "a", "b", "c" };
    // First poll has no high watermark advance
    context.client.poll();
    assertEquals(OptionalLong.of(1L), context.client.highWatermark());
    context.client.scheduleAppend(context.currentEpoch(), Arrays.asList(appendRecords));
    // Then poll the appended data with leader change record
    context.client.poll();
    assertEquals(OptionalLong.of(4L), context.client.highWatermark());
    // Now try reading it
    int otherNodeId = 1;
    List<MutableRecordBatch> batches = new ArrayList<>(2);
    boolean appended = true;
    // Continue to fetch until the leader returns an empty response
    while (appended) {
        long fetchOffset = 0;
        int lastFetchedEpoch = 0;
        if (!batches.isEmpty()) {
            MutableRecordBatch lastBatch = batches.get(batches.size() - 1);
            fetchOffset = lastBatch.lastOffset() + 1;
            lastFetchedEpoch = lastBatch.partitionLeaderEpoch();
        }
        context.deliverRequest(context.fetchRequest(1, otherNodeId, fetchOffset, lastFetchedEpoch, 0));
        context.pollUntilResponse();
        MemoryRecords fetchedRecords = context.assertSentFetchPartitionResponse(Errors.NONE, 1, OptionalInt.of(localId));
        List<MutableRecordBatch> fetchedBatch = Utils.toList(fetchedRecords.batchIterator());
        batches.addAll(fetchedBatch);
        appended = !fetchedBatch.isEmpty();
    }
    assertEquals(2, batches.size());
    MutableRecordBatch leaderChangeBatch = batches.get(0);
    assertTrue(leaderChangeBatch.isControlBatch());
    List<Record> readRecords = Utils.toList(leaderChangeBatch.iterator());
    assertEquals(1, readRecords.size());
    Record record = readRecords.get(0);
    assertEquals(now, record.timestamp());
    RaftClientTestContext.verifyLeaderChangeMessage(localId, Collections.singletonList(localId), Collections.singletonList(localId), record.key(), record.value());
    MutableRecordBatch batch = batches.get(1);
    assertEquals(1, batch.partitionLeaderEpoch());
    readRecords = Utils.toList(batch.iterator());
    assertEquals(3, readRecords.size());
    for (int i = 0; i < appendRecords.length; i++) {
        assertEquals(appendRecords[i], Utils.utf8(readRecords.get(i).value()));
    }
}
Also used : ArrayList(java.util.ArrayList) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Record(org.apache.kafka.common.record.Record) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Example 50 with MemoryRecords

use of org.apache.kafka.common.record.MemoryRecords in project kafka by apache.

the class BatchBuilderTest method testBuildBatch.

@ParameterizedTest
@EnumSource(CompressionType.class)
void testBuildBatch(CompressionType compressionType) {
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    long baseOffset = 57;
    long logAppendTime = time.milliseconds();
    boolean isControlBatch = false;
    int leaderEpoch = 15;
    BatchBuilder<String> builder = new BatchBuilder<>(buffer, serde, compressionType, baseOffset, logAppendTime, isControlBatch, leaderEpoch, buffer.limit());
    List<String> records = Arrays.asList("a", "ap", "app", "appl", "apple");
    records.forEach(record -> builder.appendRecord(record, null));
    MemoryRecords builtRecordSet = builder.build();
    assertTrue(builder.bytesNeeded(Arrays.asList("a"), null).isPresent());
    assertThrows(IllegalStateException.class, () -> builder.appendRecord("a", null));
    List<MutableRecordBatch> builtBatches = Utils.toList(builtRecordSet.batchIterator());
    assertEquals(1, builtBatches.size());
    assertEquals(records, builder.records());
    MutableRecordBatch batch = builtBatches.get(0);
    assertEquals(5, batch.countOrNull());
    assertEquals(compressionType, batch.compressionType());
    assertEquals(baseOffset, batch.baseOffset());
    assertEquals(logAppendTime, batch.maxTimestamp());
    assertEquals(isControlBatch, batch.isControlBatch());
    assertEquals(leaderEpoch, batch.partitionLeaderEpoch());
    List<String> builtRecords = Utils.toList(batch).stream().map(record -> Utils.utf8(record.value())).collect(Collectors.toList());
    assertEquals(records, builtRecords);
}
Also used : Utils(org.apache.kafka.common.utils.Utils) ValueSource(org.junit.jupiter.params.provider.ValueSource) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) CompressionType(org.apache.kafka.common.record.CompressionType) EnumSource(org.junit.jupiter.params.provider.EnumSource) Collectors(java.util.stream.Collectors) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

MemoryRecords (org.apache.kafka.common.record.MemoryRecords)108 TopicPartition (org.apache.kafka.common.TopicPartition)59 Test (org.junit.jupiter.api.Test)43 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)40 ByteBuffer (java.nio.ByteBuffer)34 ArrayList (java.util.ArrayList)28 List (java.util.List)27 Test (org.junit.Test)27 HashMap (java.util.HashMap)26 LinkedHashMap (java.util.LinkedHashMap)23 MemoryRecordsBuilder (org.apache.kafka.common.record.MemoryRecordsBuilder)23 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)18 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)16 Collections.singletonList (java.util.Collections.singletonList)15 Record (org.apache.kafka.common.record.Record)15 Arrays.asList (java.util.Arrays.asList)14 Collections.emptyList (java.util.Collections.emptyList)14 ByteArrayDeserializer (org.apache.kafka.common.serialization.ByteArrayDeserializer)14 Metrics (org.apache.kafka.common.metrics.Metrics)12 MutableRecordBatch (org.apache.kafka.common.record.MutableRecordBatch)11