use of org.apache.kafka.metadata.MetadataRecordSerde in project kafka by apache.
the class LocalLogManager method scheduleLogCheck.
private void scheduleLogCheck() {
eventQueue.append(() -> {
try {
log.debug("Node {}: running log check.", nodeId);
int numEntriesFound = 0;
for (MetaLogListenerData listenerData : listeners.values()) {
while (true) {
// Load the snapshot if needed and we are not the leader
LeaderAndEpoch notifiedLeader = listenerData.notifiedLeader();
if (!OptionalInt.of(nodeId).equals(notifiedLeader.leaderId())) {
Optional<RawSnapshotReader> snapshot = shared.nextSnapshot(listenerData.offset());
if (snapshot.isPresent()) {
log.trace("Node {}: handling snapshot with id {}.", nodeId, snapshot.get().snapshotId());
listenerData.handleSnapshot(RecordsSnapshotReader.of(snapshot.get(), new MetadataRecordSerde(), BufferSupplier.create(), Integer.MAX_VALUE));
}
}
Entry<Long, LocalBatch> entry = shared.nextBatch(listenerData.offset());
if (entry == null) {
log.trace("Node {}: reached the end of the log after finding " + "{} entries.", nodeId, numEntriesFound);
break;
}
long entryOffset = entry.getKey();
if (entryOffset > maxReadOffset) {
log.trace("Node {}: after {} entries, not reading the next " + "entry because its offset is {}, and maxReadOffset is {}.", nodeId, numEntriesFound, entryOffset, maxReadOffset);
break;
}
if (entry.getValue() instanceof LeaderChangeBatch) {
LeaderChangeBatch batch = (LeaderChangeBatch) entry.getValue();
log.trace("Node {}: handling LeaderChange to {}.", nodeId, batch.newLeader);
// Only notify the listener if it equals the shared leader state
LeaderAndEpoch sharedLeader = shared.leaderAndEpoch();
if (batch.newLeader.equals(sharedLeader)) {
listenerData.handleLeaderChange(entryOffset, batch.newLeader);
if (batch.newLeader.epoch() > leader.epoch()) {
leader = batch.newLeader;
}
} else {
log.debug("Node {}: Ignoring {} since it doesn't match the latest known leader {}", nodeId, batch.newLeader, sharedLeader);
listenerData.setOffset(entryOffset);
}
} else if (entry.getValue() instanceof LocalRecordBatch) {
LocalRecordBatch batch = (LocalRecordBatch) entry.getValue();
log.trace("Node {}: handling LocalRecordBatch with offset {}.", nodeId, entryOffset);
ObjectSerializationCache objectCache = new ObjectSerializationCache();
listenerData.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(entryOffset - batch.records.size() + 1, batch.leaderEpoch, batch.appendTimestamp, batch.records.stream().mapToInt(record -> messageSize(record, objectCache)).sum(), batch.records)), reader -> {
}));
}
numEntriesFound++;
}
}
log.trace("Completed log check for node " + nodeId);
} catch (Exception e) {
log.error("Exception while handling log check", e);
}
});
}
Aggregations