use of org.apache.kafka.common.protocol.ObjectSerializationCache in project kafka by apache.
the class LocalLogManager method scheduleLogCheck.
private void scheduleLogCheck() {
eventQueue.append(() -> {
try {
log.debug("Node {}: running log check.", nodeId);
int numEntriesFound = 0;
for (MetaLogListenerData listenerData : listeners.values()) {
while (true) {
// Load the snapshot if needed and we are not the leader
LeaderAndEpoch notifiedLeader = listenerData.notifiedLeader();
if (!OptionalInt.of(nodeId).equals(notifiedLeader.leaderId())) {
Optional<RawSnapshotReader> snapshot = shared.nextSnapshot(listenerData.offset());
if (snapshot.isPresent()) {
log.trace("Node {}: handling snapshot with id {}.", nodeId, snapshot.get().snapshotId());
listenerData.handleSnapshot(RecordsSnapshotReader.of(snapshot.get(), new MetadataRecordSerde(), BufferSupplier.create(), Integer.MAX_VALUE));
}
}
Entry<Long, LocalBatch> entry = shared.nextBatch(listenerData.offset());
if (entry == null) {
log.trace("Node {}: reached the end of the log after finding " + "{} entries.", nodeId, numEntriesFound);
break;
}
long entryOffset = entry.getKey();
if (entryOffset > maxReadOffset) {
log.trace("Node {}: after {} entries, not reading the next " + "entry because its offset is {}, and maxReadOffset is {}.", nodeId, numEntriesFound, entryOffset, maxReadOffset);
break;
}
if (entry.getValue() instanceof LeaderChangeBatch) {
LeaderChangeBatch batch = (LeaderChangeBatch) entry.getValue();
log.trace("Node {}: handling LeaderChange to {}.", nodeId, batch.newLeader);
// Only notify the listener if it equals the shared leader state
LeaderAndEpoch sharedLeader = shared.leaderAndEpoch();
if (batch.newLeader.equals(sharedLeader)) {
listenerData.handleLeaderChange(entryOffset, batch.newLeader);
if (batch.newLeader.epoch() > leader.epoch()) {
leader = batch.newLeader;
}
} else {
log.debug("Node {}: Ignoring {} since it doesn't match the latest known leader {}", nodeId, batch.newLeader, sharedLeader);
listenerData.setOffset(entryOffset);
}
} else if (entry.getValue() instanceof LocalRecordBatch) {
LocalRecordBatch batch = (LocalRecordBatch) entry.getValue();
log.trace("Node {}: handling LocalRecordBatch with offset {}.", nodeId, entryOffset);
ObjectSerializationCache objectCache = new ObjectSerializationCache();
listenerData.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(entryOffset - batch.records.size() + 1, batch.leaderEpoch, batch.appendTimestamp, batch.records.stream().mapToInt(record -> messageSize(record, objectCache)).sum(), batch.records)), reader -> {
}));
}
numEntriesFound++;
}
}
log.trace("Completed log check for node " + nodeId);
} catch (Exception e) {
log.error("Exception while handling log check", e);
}
});
}
use of org.apache.kafka.common.protocol.ObjectSerializationCache in project kafka by apache.
the class RecordTestUtils method sizeInBytes.
private static int sizeInBytes(List<ApiMessageAndVersion> records) {
int size = 0;
for (ApiMessageAndVersion record : records) {
ObjectSerializationCache cache = new ObjectSerializationCache();
size += MetadataRecordSerde.INSTANCE.recordSize(record, cache);
}
return size;
}
use of org.apache.kafka.common.protocol.ObjectSerializationCache in project kafka by apache.
the class MetadataRecordSerdeTest method testParsingRecordWithGarbageAtEnd.
/**
* Test attempting to parse an event which has a malformed message version varint.
*/
@Test
public void testParsingRecordWithGarbageAtEnd() {
MetadataRecordSerde serde = new MetadataRecordSerde();
RegisterBrokerRecord message = new RegisterBrokerRecord().setBrokerId(1).setBrokerEpoch(2);
ObjectSerializationCache cache = new ObjectSerializationCache();
ApiMessageAndVersion messageAndVersion = new ApiMessageAndVersion(message, (short) 0);
int size = serde.recordSize(messageAndVersion, cache);
ByteBuffer buffer = ByteBuffer.allocate(size + 1);
serde.write(messageAndVersion, cache, new ByteBufferAccessor(buffer));
buffer.clear();
assertStartsWith("Found 1 byte(s) of garbage after", assertThrows(MetadataParseException.class, () -> serde.read(new ByteBufferAccessor(buffer), size + 1)).getMessage());
}
use of org.apache.kafka.common.protocol.ObjectSerializationCache in project kafka by apache.
the class BatchAccumulator method append.
private long append(int epoch, List<T> records, boolean isAtomic) {
if (epoch < this.epoch) {
throw new NotLeaderException("Append failed because the epoch doesn't match");
} else if (epoch > this.epoch) {
throw new IllegalArgumentException("Attempt to append from epoch " + epoch + " which is larger than the current epoch " + this.epoch);
}
ObjectSerializationCache serializationCache = new ObjectSerializationCache();
appendLock.lock();
try {
maybeCompleteDrain();
BatchBuilder<T> batch = null;
if (isAtomic) {
batch = maybeAllocateBatch(records, serializationCache);
}
for (T record : records) {
if (!isAtomic) {
batch = maybeAllocateBatch(Collections.singleton(record), serializationCache);
}
if (batch == null) {
throw new BufferAllocationException("Append failed because we failed to allocate memory to write the batch");
}
batch.appendRecord(record, serializationCache);
nextOffset += 1;
}
maybeResetLinger();
return nextOffset - 1;
} finally {
appendLock.unlock();
}
}
use of org.apache.kafka.common.protocol.ObjectSerializationCache in project kafka by apache.
the class MessageTest method verifyWriteRaisesUve.
private void verifyWriteRaisesUve(short version, String problemText, Message message) {
ObjectSerializationCache cache = new ObjectSerializationCache();
UnsupportedVersionException e = assertThrows(UnsupportedVersionException.class, () -> {
int size = message.size(cache, version);
ByteBuffer buf = ByteBuffer.allocate(size);
ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(buf);
message.write(byteBufferAccessor, cache, version);
});
assertTrue(e.getMessage().contains(problemText), "Expected to get an error message about " + problemText + ", but got: " + e.getMessage());
}
Aggregations