Search in sources :

Example 11 with Records

use of org.apache.kafka.common.record.Records in project kafka by apache.

the class ProduceRequest method validateRecords.

public static void validateRecords(short version, BaseRecords baseRecords) {
    if (version >= 3) {
        if (baseRecords instanceof Records) {
            Records records = (Records) baseRecords;
            Iterator<? extends RecordBatch> iterator = records.batches().iterator();
            if (!iterator.hasNext())
                throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + "one record batch");
            RecordBatch entry = iterator.next();
            if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
                throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain record batches with magic version 2");
            if (version < 7 && entry.compressionType() == CompressionType.ZSTD) {
                throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + "use ZStandard compression");
            }
            if (iterator.hasNext())
                throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain exactly one record batch");
        }
    }
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
Also used : UnsupportedCompressionTypeException(org.apache.kafka.common.errors.UnsupportedCompressionTypeException) RecordBatch(org.apache.kafka.common.record.RecordBatch) Records(org.apache.kafka.common.record.Records) BaseRecords(org.apache.kafka.common.record.BaseRecords) InvalidRecordException(org.apache.kafka.common.InvalidRecordException)

Example 12 with Records

use of org.apache.kafka.common.record.Records in project kafka by apache.

the class MockLogTest method readOffsets.

private Optional<OffsetRange> readOffsets(long startOffset, Isolation isolation) {
    // The current MockLog implementation reads at most one batch
    long firstReadOffset = -1L;
    long lastReadOffset = -1L;
    long currentStart = startOffset;
    boolean foundRecord = true;
    while (foundRecord) {
        foundRecord = false;
        Records records = log.read(currentStart, isolation).records;
        for (Record record : records.records()) {
            foundRecord = true;
            if (firstReadOffset < 0L) {
                firstReadOffset = record.offset();
            }
            if (record.offset() > lastReadOffset) {
                lastReadOffset = record.offset();
            }
        }
        currentStart = lastReadOffset + 1;
    }
    if (firstReadOffset < 0) {
        return Optional.empty();
    } else {
        return Optional.of(new OffsetRange(firstReadOffset, lastReadOffset));
    }
}
Also used : Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Records(org.apache.kafka.common.record.Records) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 13 with Records

use of org.apache.kafka.common.record.Records in project kafka by apache.

the class MockLogTest method testAppendAsFollower.

@Test
public void testAppendAsFollower() throws IOException {
    final long initialOffset = 5;
    final int epoch = 3;
    SimpleRecord recordFoo = new SimpleRecord("foo".getBytes());
    try (RawSnapshotWriter snapshot = log.storeSnapshot(new OffsetAndEpoch(initialOffset, 0)).get()) {
        snapshot.freeze();
    }
    log.truncateToLatestSnapshot();
    log.appendAsFollower(MemoryRecords.withRecords(initialOffset, CompressionType.NONE, epoch, recordFoo));
    assertEquals(initialOffset, log.startOffset());
    assertEquals(initialOffset + 1, log.endOffset().offset);
    assertEquals(3, log.lastFetchedEpoch());
    Records records = log.read(5L, Isolation.UNCOMMITTED).records;
    List<ByteBuffer> extractRecords = new ArrayList<>();
    for (Record record : records.records()) {
        extractRecords.add(record.value());
    }
    assertEquals(1, extractRecords.size());
    assertEquals(recordFoo.value(), extractRecords.get(0));
    assertEquals(new OffsetAndEpoch(5, 0), log.endOffsetForEpoch(0));
    assertEquals(new OffsetAndEpoch(log.endOffset().offset, epoch), log.endOffsetForEpoch(epoch));
}
Also used : RawSnapshotWriter(org.apache.kafka.snapshot.RawSnapshotWriter) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) ArrayList(java.util.ArrayList) Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Records(org.apache.kafka.common.record.Records) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test)

Example 14 with Records

use of org.apache.kafka.common.record.Records in project kafka by apache.

the class MockLogTest method validateReadRecords.

private static void validateReadRecords(List<SimpleRecord> expectedRecords, MockLog log) {
    assertEquals(0L, log.startOffset());
    assertEquals(expectedRecords.size(), log.endOffset().offset);
    int currentOffset = 0;
    while (currentOffset < log.endOffset().offset) {
        Records records = log.read(currentOffset, Isolation.UNCOMMITTED).records;
        List<? extends RecordBatch> batches = Utils.toList(records.batches().iterator());
        assertTrue(batches.size() > 0);
        for (RecordBatch batch : batches) {
            assertTrue(batch.countOrNull() > 0);
            assertEquals(currentOffset, batch.baseOffset());
            assertEquals(currentOffset + batch.countOrNull() - 1, batch.lastOffset());
            for (Record record : batch) {
                assertEquals(currentOffset, record.offset());
                assertEquals(expectedRecords.get(currentOffset), new SimpleRecord(record));
                currentOffset += 1;
            }
            assertEquals(currentOffset - 1, batch.lastOffset());
        }
    }
}
Also used : RecordBatch(org.apache.kafka.common.record.RecordBatch) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Record(org.apache.kafka.common.record.Record) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) Records(org.apache.kafka.common.record.Records) MemoryRecords(org.apache.kafka.common.record.MemoryRecords)

Example 15 with Records

use of org.apache.kafka.common.record.Records in project kafka by apache.

the class KafkaRaftClientTest method testInitializeAsCandidateAndBecomeLeaderQuorumOfThree.

@Test
public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree() throws Exception {
    int localId = 0;
    final int firstNodeId = 1;
    final int secondNodeId = 2;
    Set<Integer> voters = Utils.mkSet(localId, firstNodeId, secondNodeId);
    RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build();
    context.assertUnknownLeader(0);
    context.time.sleep(2 * context.electionTimeoutMs());
    context.pollUntilRequest();
    context.assertVotedCandidate(1, localId);
    int correlationId = context.assertSentVoteRequest(1, 0, 0L, 2);
    context.deliverResponse(correlationId, firstNodeId, context.voteResponse(true, Optional.empty(), 1));
    // Become leader after receiving the vote
    context.pollUntil(() -> context.log.endOffset().offset == 1L);
    context.assertElectedLeader(1, localId);
    long electionTimestamp = context.time.milliseconds();
    // Leader change record appended
    assertEquals(1L, context.log.endOffset().offset);
    assertEquals(1L, context.log.lastFlushedOffset());
    // Send BeginQuorumEpoch to voters
    context.client.poll();
    context.assertSentBeginQuorumEpochRequest(1, 2);
    Records records = context.log.read(0, Isolation.UNCOMMITTED).records;
    RecordBatch batch = records.batches().iterator().next();
    assertTrue(batch.isControlBatch());
    Record record = batch.iterator().next();
    assertEquals(electionTimestamp, record.timestamp());
    RaftClientTestContext.verifyLeaderChangeMessage(localId, Arrays.asList(localId, firstNodeId, secondNodeId), Arrays.asList(firstNodeId, localId), record.key(), record.value());
}
Also used : RecordBatch(org.apache.kafka.common.record.RecordBatch) MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) Record(org.apache.kafka.common.record.Record) Records(org.apache.kafka.common.record.Records) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.jupiter.api.Test)

Aggregations

Records (org.apache.kafka.common.record.Records)19 MemoryRecords (org.apache.kafka.common.record.MemoryRecords)16 Test (org.junit.jupiter.api.Test)11 Record (org.apache.kafka.common.record.Record)9 ByteBuffer (java.nio.ByteBuffer)8 RecordBatch (org.apache.kafka.common.record.RecordBatch)7 SimpleRecord (org.apache.kafka.common.record.SimpleRecord)7 FetchResponseData (org.apache.kafka.common.message.FetchResponseData)6 ArrayList (java.util.ArrayList)5 KafkaException (org.apache.kafka.common.KafkaException)3 Errors (org.apache.kafka.common.protocol.Errors)3 DataOutputStream (java.io.DataOutputStream)2 Field (java.lang.reflect.Field)2 StandardCharsets (java.nio.charset.StandardCharsets)2 Duration (java.time.Duration)2 Arrays (java.util.Arrays)2 Arrays.asList (java.util.Arrays.asList)2 Collections (java.util.Collections)2 Collections.emptyList (java.util.Collections.emptyList)2 Collections.emptyMap (java.util.Collections.emptyMap)2