use of org.apache.kafka.common.record.Records in project kafka by apache.
the class ProduceRequest method validateRecords.
public static void validateRecords(short version, BaseRecords baseRecords) {
if (version >= 3) {
if (baseRecords instanceof Records) {
Records records = (Records) baseRecords;
Iterator<? extends RecordBatch> iterator = records.batches().iterator();
if (!iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " must have at least " + "one record batch");
RecordBatch entry = iterator.next();
if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain record batches with magic version 2");
if (version < 7 && entry.compressionType() == CompressionType.ZSTD) {
throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " + "use ZStandard compression");
}
if (iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " + "contain exactly one record batch");
}
}
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class MockLogTest method readOffsets.
private Optional<OffsetRange> readOffsets(long startOffset, Isolation isolation) {
// The current MockLog implementation reads at most one batch
long firstReadOffset = -1L;
long lastReadOffset = -1L;
long currentStart = startOffset;
boolean foundRecord = true;
while (foundRecord) {
foundRecord = false;
Records records = log.read(currentStart, isolation).records;
for (Record record : records.records()) {
foundRecord = true;
if (firstReadOffset < 0L) {
firstReadOffset = record.offset();
}
if (record.offset() > lastReadOffset) {
lastReadOffset = record.offset();
}
}
currentStart = lastReadOffset + 1;
}
if (firstReadOffset < 0) {
return Optional.empty();
} else {
return Optional.of(new OffsetRange(firstReadOffset, lastReadOffset));
}
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class MockLogTest method testAppendAsFollower.
@Test
public void testAppendAsFollower() throws IOException {
final long initialOffset = 5;
final int epoch = 3;
SimpleRecord recordFoo = new SimpleRecord("foo".getBytes());
try (RawSnapshotWriter snapshot = log.storeSnapshot(new OffsetAndEpoch(initialOffset, 0)).get()) {
snapshot.freeze();
}
log.truncateToLatestSnapshot();
log.appendAsFollower(MemoryRecords.withRecords(initialOffset, CompressionType.NONE, epoch, recordFoo));
assertEquals(initialOffset, log.startOffset());
assertEquals(initialOffset + 1, log.endOffset().offset);
assertEquals(3, log.lastFetchedEpoch());
Records records = log.read(5L, Isolation.UNCOMMITTED).records;
List<ByteBuffer> extractRecords = new ArrayList<>();
for (Record record : records.records()) {
extractRecords.add(record.value());
}
assertEquals(1, extractRecords.size());
assertEquals(recordFoo.value(), extractRecords.get(0));
assertEquals(new OffsetAndEpoch(5, 0), log.endOffsetForEpoch(0));
assertEquals(new OffsetAndEpoch(log.endOffset().offset, epoch), log.endOffsetForEpoch(epoch));
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class MockLogTest method validateReadRecords.
private static void validateReadRecords(List<SimpleRecord> expectedRecords, MockLog log) {
assertEquals(0L, log.startOffset());
assertEquals(expectedRecords.size(), log.endOffset().offset);
int currentOffset = 0;
while (currentOffset < log.endOffset().offset) {
Records records = log.read(currentOffset, Isolation.UNCOMMITTED).records;
List<? extends RecordBatch> batches = Utils.toList(records.batches().iterator());
assertTrue(batches.size() > 0);
for (RecordBatch batch : batches) {
assertTrue(batch.countOrNull() > 0);
assertEquals(currentOffset, batch.baseOffset());
assertEquals(currentOffset + batch.countOrNull() - 1, batch.lastOffset());
for (Record record : batch) {
assertEquals(currentOffset, record.offset());
assertEquals(expectedRecords.get(currentOffset), new SimpleRecord(record));
currentOffset += 1;
}
assertEquals(currentOffset - 1, batch.lastOffset());
}
}
}
use of org.apache.kafka.common.record.Records in project kafka by apache.
the class KafkaRaftClientTest method testInitializeAsCandidateAndBecomeLeaderQuorumOfThree.
@Test
public void testInitializeAsCandidateAndBecomeLeaderQuorumOfThree() throws Exception {
int localId = 0;
final int firstNodeId = 1;
final int secondNodeId = 2;
Set<Integer> voters = Utils.mkSet(localId, firstNodeId, secondNodeId);
RaftClientTestContext context = new RaftClientTestContext.Builder(localId, voters).build();
context.assertUnknownLeader(0);
context.time.sleep(2 * context.electionTimeoutMs());
context.pollUntilRequest();
context.assertVotedCandidate(1, localId);
int correlationId = context.assertSentVoteRequest(1, 0, 0L, 2);
context.deliverResponse(correlationId, firstNodeId, context.voteResponse(true, Optional.empty(), 1));
// Become leader after receiving the vote
context.pollUntil(() -> context.log.endOffset().offset == 1L);
context.assertElectedLeader(1, localId);
long electionTimestamp = context.time.milliseconds();
// Leader change record appended
assertEquals(1L, context.log.endOffset().offset);
assertEquals(1L, context.log.lastFlushedOffset());
// Send BeginQuorumEpoch to voters
context.client.poll();
context.assertSentBeginQuorumEpochRequest(1, 2);
Records records = context.log.read(0, Isolation.UNCOMMITTED).records;
RecordBatch batch = records.batches().iterator().next();
assertTrue(batch.isControlBatch());
Record record = batch.iterator().next();
assertEquals(electionTimestamp, record.timestamp());
RaftClientTestContext.verifyLeaderChangeMessage(localId, Arrays.asList(localId, firstNodeId, secondNodeId), Arrays.asList(firstNodeId, localId), record.key(), record.value());
}
Aggregations