use of org.apache.kafka.raft.Batch in project kafka by apache.
the class SnapshotFileReader method handleMetadataBatch.
private void handleMetadataBatch(FileChannelRecordBatch batch) {
List<ApiMessageAndVersion> messages = new ArrayList<>();
for (Record record : batch) {
ByteBufferAccessor accessor = new ByteBufferAccessor(record.value());
try {
ApiMessageAndVersion messageAndVersion = serde.read(accessor, record.valueSize());
messages.add(messageAndVersion);
} catch (Throwable e) {
log.error("unable to read metadata record at offset {}", record.offset(), e);
}
}
listener.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(batch.baseOffset(), batch.partitionLeaderEpoch(), batch.maxTimestamp(), batch.sizeInBytes(), messages)), reader -> {
}));
}
use of org.apache.kafka.raft.Batch in project kafka by apache.
the class LocalLogManager method scheduleAppend.
@Override
public long scheduleAppend(int epoch, List<ApiMessageAndVersion> batch) {
if (batch.isEmpty()) {
throw new IllegalArgumentException("Batch cannot be empty");
}
List<ApiMessageAndVersion> first = batch.subList(0, batch.size() / 2);
List<ApiMessageAndVersion> second = batch.subList(batch.size() / 2, batch.size());
assertEquals(batch.size(), first.size() + second.size());
assertFalse(second.isEmpty());
OptionalLong firstOffset = first.stream().mapToLong(record -> scheduleAtomicAppend(epoch, Collections.singletonList(record))).max();
if (firstOffset.isPresent() && resignAfterNonAtomicCommit.getAndSet(false)) {
// Emulate losing leadership in the middle of a non-atomic append by not writing
// the rest of the batch and instead writing a leader change message
resign(leader.epoch());
return firstOffset.getAsLong() + second.size();
} else {
return second.stream().mapToLong(record -> scheduleAtomicAppend(epoch, Collections.singletonList(record))).max().getAsLong();
}
}
use of org.apache.kafka.raft.Batch in project kafka by apache.
the class QuorumControllerTest method checkSnapshotSubcontent.
/**
* This function checks that the iterator is a subset of the expected list.
*
* This is needed because when generating snapshots through configuration is difficult to control exactly when a
* snapshot will be generated and which committed offset will be included in the snapshot.
*/
private void checkSnapshotSubcontent(List<ApiMessageAndVersion> expected, Iterator<Batch<ApiMessageAndVersion>> iterator) throws Exception {
RecordTestUtils.deepSortRecords(expected);
List<ApiMessageAndVersion> actual = StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false).flatMap(batch -> batch.records().stream()).collect(Collectors.toList());
RecordTestUtils.deepSortRecords(actual);
int expectedIndex = 0;
for (ApiMessageAndVersion current : actual) {
while (expectedIndex < expected.size() && !expected.get(expectedIndex).equals(current)) {
expectedIndex += 1;
}
expectedIndex += 1;
}
assertTrue(expectedIndex <= expected.size(), String.format("actual is not a subset of expected: expected = %s; actual = %s", expected, actual));
}
Aggregations