use of org.apache.kafka.raft.LeaderAndEpoch in project kafka by apache.
the class LocalLogManager method resign.
@Override
public void resign(int epoch) {
LeaderAndEpoch curLeader = leader;
LeaderAndEpoch nextLeader = new LeaderAndEpoch(OptionalInt.empty(), curLeader.epoch() + 1);
shared.tryAppend(nodeId, curLeader.epoch(), new LeaderChangeBatch(nextLeader));
}
use of org.apache.kafka.raft.LeaderAndEpoch in project kafka by apache.
the class LocalLogManager method scheduleLogCheck.
private void scheduleLogCheck() {
eventQueue.append(() -> {
try {
log.debug("Node {}: running log check.", nodeId);
int numEntriesFound = 0;
for (MetaLogListenerData listenerData : listeners.values()) {
while (true) {
// Load the snapshot if needed and we are not the leader
LeaderAndEpoch notifiedLeader = listenerData.notifiedLeader();
if (!OptionalInt.of(nodeId).equals(notifiedLeader.leaderId())) {
Optional<RawSnapshotReader> snapshot = shared.nextSnapshot(listenerData.offset());
if (snapshot.isPresent()) {
log.trace("Node {}: handling snapshot with id {}.", nodeId, snapshot.get().snapshotId());
listenerData.handleSnapshot(RecordsSnapshotReader.of(snapshot.get(), new MetadataRecordSerde(), BufferSupplier.create(), Integer.MAX_VALUE));
}
}
Entry<Long, LocalBatch> entry = shared.nextBatch(listenerData.offset());
if (entry == null) {
log.trace("Node {}: reached the end of the log after finding " + "{} entries.", nodeId, numEntriesFound);
break;
}
long entryOffset = entry.getKey();
if (entryOffset > maxReadOffset) {
log.trace("Node {}: after {} entries, not reading the next " + "entry because its offset is {}, and maxReadOffset is {}.", nodeId, numEntriesFound, entryOffset, maxReadOffset);
break;
}
if (entry.getValue() instanceof LeaderChangeBatch) {
LeaderChangeBatch batch = (LeaderChangeBatch) entry.getValue();
log.trace("Node {}: handling LeaderChange to {}.", nodeId, batch.newLeader);
// Only notify the listener if it equals the shared leader state
LeaderAndEpoch sharedLeader = shared.leaderAndEpoch();
if (batch.newLeader.equals(sharedLeader)) {
listenerData.handleLeaderChange(entryOffset, batch.newLeader);
if (batch.newLeader.epoch() > leader.epoch()) {
leader = batch.newLeader;
}
} else {
log.debug("Node {}: Ignoring {} since it doesn't match the latest known leader {}", nodeId, batch.newLeader, sharedLeader);
listenerData.setOffset(entryOffset);
}
} else if (entry.getValue() instanceof LocalRecordBatch) {
LocalRecordBatch batch = (LocalRecordBatch) entry.getValue();
log.trace("Node {}: handling LocalRecordBatch with offset {}.", nodeId, entryOffset);
ObjectSerializationCache objectCache = new ObjectSerializationCache();
listenerData.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(entryOffset - batch.records.size() + 1, batch.leaderEpoch, batch.appendTimestamp, batch.records.stream().mapToInt(record -> messageSize(record, objectCache)).sum(), batch.records)), reader -> {
}));
}
numEntriesFound++;
}
}
log.trace("Completed log check for node " + nodeId);
} catch (Exception e) {
log.error("Exception while handling log check", e);
}
});
}
use of org.apache.kafka.raft.LeaderAndEpoch in project kafka by apache.
the class LocalLogManagerTest method testClaimsLeadership.
/**
* Test that the local log manager will claim leadership.
*/
@Test
public void testClaimsLeadership() throws Exception {
try (LocalLogManagerTestEnv env = LocalLogManagerTestEnv.createWithMockListeners(1, Optional.empty())) {
assertEquals(new LeaderAndEpoch(OptionalInt.of(0), 1), env.waitForLeader());
env.close();
assertEquals(null, env.firstError.get());
}
}
use of org.apache.kafka.raft.LeaderAndEpoch in project kafka by apache.
the class LocalLogManagerTest method testPassLeadership.
/**
* Test that we can pass leadership back and forth between log managers.
*/
@Test
public void testPassLeadership() throws Exception {
try (LocalLogManagerTestEnv env = LocalLogManagerTestEnv.createWithMockListeners(3, Optional.empty())) {
LeaderAndEpoch first = env.waitForLeader();
LeaderAndEpoch cur = first;
do {
int currentLeaderId = cur.leaderId().orElseThrow(() -> new AssertionError("Current leader is undefined"));
env.logManagers().get(currentLeaderId).resign(cur.epoch());
LeaderAndEpoch next = env.waitForLeader();
while (next.epoch() == cur.epoch()) {
Thread.sleep(1);
next = env.waitForLeader();
}
long expectedNextEpoch = cur.epoch() + 2;
assertEquals(expectedNextEpoch, next.epoch(), "Expected next epoch to be " + expectedNextEpoch + ", but found " + next);
cur = next;
} while (cur.leaderId().equals(first.leaderId()));
env.close();
assertEquals(null, env.firstError.get());
}
}
use of org.apache.kafka.raft.LeaderAndEpoch in project kafka by apache.
the class LocalLogManagerTest method testCommits.
/**
* Test that all the log managers see all the commits.
*/
@Test
public void testCommits() throws Exception {
try (LocalLogManagerTestEnv env = LocalLogManagerTestEnv.createWithMockListeners(3, Optional.empty())) {
LeaderAndEpoch leaderInfo = env.waitForLeader();
int leaderId = leaderInfo.leaderId().orElseThrow(() -> new AssertionError("Current leader is undefined"));
LocalLogManager activeLogManager = env.logManagers().get(leaderId);
int epoch = activeLogManager.leaderAndEpoch().epoch();
List<ApiMessageAndVersion> messages = Arrays.asList(new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(0), (short) 0), new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(1), (short) 0), new ApiMessageAndVersion(new RegisterBrokerRecord().setBrokerId(2), (short) 0));
assertEquals(3, activeLogManager.scheduleAppend(epoch, messages));
for (LocalLogManager logManager : env.logManagers()) {
waitForLastCommittedOffset(3, logManager);
}
List<MockMetaLogManagerListener> listeners = env.logManagers().stream().map(m -> (MockMetaLogManagerListener) m.listeners().get(0)).collect(Collectors.toList());
env.close();
for (MockMetaLogManagerListener listener : listeners) {
List<String> events = listener.serializedEvents();
assertEquals(SHUTDOWN, events.get(events.size() - 1));
int foundIndex = 0;
for (String event : events) {
if (event.startsWith(COMMIT)) {
assertEquals(messages.get(foundIndex).message().toString(), event.substring(COMMIT.length() + 1));
foundIndex++;
}
}
assertEquals(messages.size(), foundIndex);
}
}
}
Aggregations