use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.
the class LeaderState method appendLeaderChangeMessage.
public void appendLeaderChangeMessage(long currentTimeMs) {
List<Voter> voters = convertToVoters(voterStates.keySet());
List<Voter> grantingVoters = convertToVoters(this.grantingVoters());
LeaderChangeMessage leaderChangeMessage = new LeaderChangeMessage().setVersion(ControlRecordUtils.LEADER_CHANGE_SCHEMA_HIGHEST_VERSION).setLeaderId(this.election().leaderId()).setVoters(voters).setGrantingVoters(grantingVoters);
accumulator.appendLeaderChangeMessage(leaderChangeMessage, currentTimeMs);
accumulator.forceDrain();
}
use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.
the class BatchAccumulatorTest method testForceDrainBeforeAppendLeaderChangeMessage.
@Test
public void testForceDrainBeforeAppendLeaderChangeMessage() {
asList(APPEND, APPEND_ATOMIC).forEach(appender -> {
int leaderEpoch = 17;
long baseOffset = 157;
int lingerMs = 50;
int maxBatchSize = 512;
Mockito.when(memoryPool.tryAllocate(maxBatchSize)).thenReturn(ByteBuffer.allocate(maxBatchSize));
Mockito.when(memoryPool.tryAllocate(256)).thenReturn(ByteBuffer.allocate(256));
BatchAccumulator<String> acc = buildAccumulator(leaderEpoch, baseOffset, lingerMs, maxBatchSize);
List<String> records = asList("a", "b", "c", "d", "e", "f", "g", "h", "i");
// Append records
assertEquals(baseOffset, appender.call(acc, leaderEpoch, records.subList(0, 1)));
assertEquals(baseOffset + 2, appender.call(acc, leaderEpoch, records.subList(1, 3)));
assertEquals(baseOffset + 5, appender.call(acc, leaderEpoch, records.subList(3, 6)));
assertEquals(baseOffset + 7, appender.call(acc, leaderEpoch, records.subList(6, 8)));
assertEquals(baseOffset + 8, appender.call(acc, leaderEpoch, records.subList(8, 9)));
assertFalse(acc.needsDrain(time.milliseconds()));
// Append a leader change message
acc.appendLeaderChangeMessage(new LeaderChangeMessage(), time.milliseconds());
assertTrue(acc.needsDrain(time.milliseconds()));
// Test that drain status is FINISHED
assertEquals(0, acc.timeUntilDrain(time.milliseconds()));
// Drain completed batches
List<BatchAccumulator.CompletedBatch<String>> batches = acc.drain();
// Should have 2 batches, one consisting of `records` and one `leaderChangeMessage`
assertEquals(2, batches.size());
assertFalse(acc.needsDrain(time.milliseconds()));
assertEquals(Long.MAX_VALUE - time.milliseconds(), acc.timeUntilDrain(time.milliseconds()));
BatchAccumulator.CompletedBatch<String> batch = batches.get(0);
assertEquals(records, batch.records.get());
assertEquals(baseOffset, batch.baseOffset);
assertEquals(time.milliseconds(), batch.appendTimestamp());
});
}
use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.
the class MemoryRecordsTest method testBuildLeaderChangeMessage.
@Test
public void testBuildLeaderChangeMessage() {
final int leaderId = 5;
final int leaderEpoch = 20;
final int voterId = 6;
long initialOffset = 983L;
LeaderChangeMessage leaderChangeMessage = new LeaderChangeMessage().setLeaderId(leaderId).setVoters(Collections.singletonList(new Voter().setVoterId(voterId)));
ByteBuffer buffer = ByteBuffer.allocate(256);
MemoryRecords records = MemoryRecords.withLeaderChangeMessage(initialOffset, System.currentTimeMillis(), leaderEpoch, buffer, leaderChangeMessage);
List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
assertEquals(1, batches.size());
RecordBatch batch = batches.get(0);
assertTrue(batch.isControlBatch());
assertEquals(initialOffset, batch.baseOffset());
assertEquals(leaderEpoch, batch.partitionLeaderEpoch());
assertTrue(batch.isValid());
List<Record> createdRecords = TestUtils.toList(batch);
assertEquals(1, createdRecords.size());
Record record = createdRecords.get(0);
record.ensureValid();
assertEquals(ControlRecordType.LEADER_CHANGE, ControlRecordType.parse(record.key()));
LeaderChangeMessage deserializedMessage = ControlRecordUtils.deserializeLeaderChangeMessage(record);
assertEquals(leaderId, deserializedMessage.leaderId());
assertEquals(1, deserializedMessage.voters().size());
assertEquals(voterId, deserializedMessage.voters().get(0).voterId());
}
use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.
the class MemoryRecordsBuilderTest method testWriteLeaderChangeControlBatch.
@ParameterizedTest
@ArgumentsSource(MemoryRecordsBuilderArgumentsProvider.class)
public void testWriteLeaderChangeControlBatch(Args args) {
ByteBuffer buffer = allocateBuffer(128, args);
final int leaderId = 1;
final int leaderEpoch = 5;
final List<Integer> voters = Arrays.asList(2, 3);
Supplier<MemoryRecordsBuilder> supplier = () -> new MemoryRecordsBuilder(buffer, args.magic, args.compressionType, TimestampType.CREATE_TIME, 0L, 0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, true, leaderEpoch, buffer.capacity());
if (args.magic < MAGIC_VALUE_V2) {
assertThrows(IllegalArgumentException.class, supplier::get);
} else {
MemoryRecordsBuilder builder = supplier.get();
builder.appendLeaderChangeMessage(RecordBatch.NO_TIMESTAMP, new LeaderChangeMessage().setLeaderId(leaderId).setVoters(voters.stream().map(voterId -> new Voter().setVoterId(voterId)).collect(Collectors.toList())));
MemoryRecords built = builder.build();
List<Record> records = TestUtils.toList(built.records());
assertEquals(1, records.size());
LeaderChangeMessage leaderChangeMessage = ControlRecordUtils.deserializeLeaderChangeMessage(records.get(0));
assertEquals(leaderId, leaderChangeMessage.leaderId());
assertEquals(voters, leaderChangeMessage.voters().stream().map(Voter::voterId).collect(Collectors.toList()));
}
}
use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.
the class SnapshotFileReader method handleControlBatch.
private void handleControlBatch(FileChannelRecordBatch batch) {
for (Iterator<Record> iter = batch.iterator(); iter.hasNext(); ) {
Record record = iter.next();
try {
short typeId = ControlRecordType.parseTypeId(record.key());
ControlRecordType type = ControlRecordType.fromTypeId(typeId);
switch(type) {
case LEADER_CHANGE:
LeaderChangeMessage message = new LeaderChangeMessage();
message.read(new ByteBufferAccessor(record.value()), (short) 0);
listener.handleLeaderChange(new LeaderAndEpoch(OptionalInt.of(message.leaderId()), batch.partitionLeaderEpoch()));
break;
default:
log.error("Ignoring control record with type {} at offset {}", type, record.offset());
}
} catch (Throwable e) {
log.error("unable to read control record at offset {}", record.offset(), e);
}
}
}
Aggregations