Search in sources :

Example 6 with LeaderChangeMessage

use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.

the class LeaderState method appendLeaderChangeMessage.

public void appendLeaderChangeMessage(long currentTimeMs) {
    List<Voter> voters = convertToVoters(voterStates.keySet());
    List<Voter> grantingVoters = convertToVoters(this.grantingVoters());
    LeaderChangeMessage leaderChangeMessage = new LeaderChangeMessage().setVersion(ControlRecordUtils.LEADER_CHANGE_SCHEMA_HIGHEST_VERSION).setLeaderId(this.election().leaderId()).setVoters(voters).setGrantingVoters(grantingVoters);
    accumulator.appendLeaderChangeMessage(leaderChangeMessage, currentTimeMs);
    accumulator.forceDrain();
}
Also used : LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) Voter(org.apache.kafka.common.message.LeaderChangeMessage.Voter)

Example 7 with LeaderChangeMessage

use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.

the class BatchAccumulatorTest method testForceDrainBeforeAppendLeaderChangeMessage.

@Test
public void testForceDrainBeforeAppendLeaderChangeMessage() {
    asList(APPEND, APPEND_ATOMIC).forEach(appender -> {
        int leaderEpoch = 17;
        long baseOffset = 157;
        int lingerMs = 50;
        int maxBatchSize = 512;
        Mockito.when(memoryPool.tryAllocate(maxBatchSize)).thenReturn(ByteBuffer.allocate(maxBatchSize));
        Mockito.when(memoryPool.tryAllocate(256)).thenReturn(ByteBuffer.allocate(256));
        BatchAccumulator<String> acc = buildAccumulator(leaderEpoch, baseOffset, lingerMs, maxBatchSize);
        List<String> records = asList("a", "b", "c", "d", "e", "f", "g", "h", "i");
        // Append records
        assertEquals(baseOffset, appender.call(acc, leaderEpoch, records.subList(0, 1)));
        assertEquals(baseOffset + 2, appender.call(acc, leaderEpoch, records.subList(1, 3)));
        assertEquals(baseOffset + 5, appender.call(acc, leaderEpoch, records.subList(3, 6)));
        assertEquals(baseOffset + 7, appender.call(acc, leaderEpoch, records.subList(6, 8)));
        assertEquals(baseOffset + 8, appender.call(acc, leaderEpoch, records.subList(8, 9)));
        assertFalse(acc.needsDrain(time.milliseconds()));
        // Append a leader change message
        acc.appendLeaderChangeMessage(new LeaderChangeMessage(), time.milliseconds());
        assertTrue(acc.needsDrain(time.milliseconds()));
        // Test that drain status is FINISHED
        assertEquals(0, acc.timeUntilDrain(time.milliseconds()));
        // Drain completed batches
        List<BatchAccumulator.CompletedBatch<String>> batches = acc.drain();
        // Should have 2 batches, one consisting of `records` and one `leaderChangeMessage`
        assertEquals(2, batches.size());
        assertFalse(acc.needsDrain(time.milliseconds()));
        assertEquals(Long.MAX_VALUE - time.milliseconds(), acc.timeUntilDrain(time.milliseconds()));
        BatchAccumulator.CompletedBatch<String> batch = batches.get(0);
        assertEquals(records, batch.records.get());
        assertEquals(baseOffset, batch.baseOffset);
        assertEquals(time.milliseconds(), batch.appendTimestamp());
    });
}
Also used : LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) Test(org.junit.jupiter.api.Test)

Example 8 with LeaderChangeMessage

use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.

the class MemoryRecordsTest method testBuildLeaderChangeMessage.

@Test
public void testBuildLeaderChangeMessage() {
    final int leaderId = 5;
    final int leaderEpoch = 20;
    final int voterId = 6;
    long initialOffset = 983L;
    LeaderChangeMessage leaderChangeMessage = new LeaderChangeMessage().setLeaderId(leaderId).setVoters(Collections.singletonList(new Voter().setVoterId(voterId)));
    ByteBuffer buffer = ByteBuffer.allocate(256);
    MemoryRecords records = MemoryRecords.withLeaderChangeMessage(initialOffset, System.currentTimeMillis(), leaderEpoch, buffer, leaderChangeMessage);
    List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
    assertEquals(1, batches.size());
    RecordBatch batch = batches.get(0);
    assertTrue(batch.isControlBatch());
    assertEquals(initialOffset, batch.baseOffset());
    assertEquals(leaderEpoch, batch.partitionLeaderEpoch());
    assertTrue(batch.isValid());
    List<Record> createdRecords = TestUtils.toList(batch);
    assertEquals(1, createdRecords.size());
    Record record = createdRecords.get(0);
    record.ensureValid();
    assertEquals(ControlRecordType.LEADER_CHANGE, ControlRecordType.parse(record.key()));
    LeaderChangeMessage deserializedMessage = ControlRecordUtils.deserializeLeaderChangeMessage(record);
    assertEquals(leaderId, deserializedMessage.leaderId());
    assertEquals(1, deserializedMessage.voters().size());
    assertEquals(voterId, deserializedMessage.voters().get(0).voterId());
}
Also used : LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) Voter(org.apache.kafka.common.message.LeaderChangeMessage.Voter) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 9 with LeaderChangeMessage

use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.

the class MemoryRecordsBuilderTest method testWriteLeaderChangeControlBatch.

@ParameterizedTest
@ArgumentsSource(MemoryRecordsBuilderArgumentsProvider.class)
public void testWriteLeaderChangeControlBatch(Args args) {
    ByteBuffer buffer = allocateBuffer(128, args);
    final int leaderId = 1;
    final int leaderEpoch = 5;
    final List<Integer> voters = Arrays.asList(2, 3);
    Supplier<MemoryRecordsBuilder> supplier = () -> new MemoryRecordsBuilder(buffer, args.magic, args.compressionType, TimestampType.CREATE_TIME, 0L, 0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, true, leaderEpoch, buffer.capacity());
    if (args.magic < MAGIC_VALUE_V2) {
        assertThrows(IllegalArgumentException.class, supplier::get);
    } else {
        MemoryRecordsBuilder builder = supplier.get();
        builder.appendLeaderChangeMessage(RecordBatch.NO_TIMESTAMP, new LeaderChangeMessage().setLeaderId(leaderId).setVoters(voters.stream().map(voterId -> new Voter().setVoterId(voterId)).collect(Collectors.toList())));
        MemoryRecords built = builder.build();
        List<Record> records = TestUtils.toList(built.records());
        assertEquals(1, records.size());
        LeaderChangeMessage leaderChangeMessage = ControlRecordUtils.deserializeLeaderChangeMessage(records.get(0));
        assertEquals(leaderId, leaderChangeMessage.leaderId());
        assertEquals(voters, leaderChangeMessage.voters().stream().map(Voter::voterId).collect(Collectors.toList()));
    }
}
Also used : Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Arrays(java.util.Arrays) MAGIC_VALUE_V2(org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2) MAGIC_VALUE_V1(org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V1) BiFunction(java.util.function.BiFunction) EnumSource(org.junit.jupiter.params.provider.EnumSource) Random(java.util.Random) ExtensionContext(org.junit.jupiter.api.extension.ExtensionContext) MAGIC_VALUE_V0(org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V0) Supplier(java.util.function.Supplier) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) OptionalLong(java.util.OptionalLong) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) Arrays.asList(java.util.Arrays.asList) ArgumentsProvider(org.junit.jupiter.params.provider.ArgumentsProvider) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) Utils(org.apache.kafka.common.utils.Utils) Time(org.apache.kafka.common.utils.Time) TestUtils(org.apache.kafka.test.TestUtils) Utils.utf8(org.apache.kafka.common.utils.Utils.utf8) Voter(org.apache.kafka.common.message.LeaderChangeMessage.Voter) Arguments(org.junit.jupiter.params.provider.Arguments) Collectors(java.util.stream.Collectors) CloseableIterator(org.apache.kafka.common.utils.CloseableIterator) Test(org.junit.jupiter.api.Test) UnsupportedCompressionTypeException(org.apache.kafka.common.errors.UnsupportedCompressionTypeException) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) List(java.util.List) Stream(java.util.stream.Stream) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource) Collections(java.util.Collections) ByteBuffer(java.nio.ByteBuffer) LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) Voter(org.apache.kafka.common.message.LeaderChangeMessage.Voter) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) ArgumentsSource(org.junit.jupiter.params.provider.ArgumentsSource)

Example 10 with LeaderChangeMessage

use of org.apache.kafka.common.message.LeaderChangeMessage in project kafka by apache.

the class SnapshotFileReader method handleControlBatch.

private void handleControlBatch(FileChannelRecordBatch batch) {
    for (Iterator<Record> iter = batch.iterator(); iter.hasNext(); ) {
        Record record = iter.next();
        try {
            short typeId = ControlRecordType.parseTypeId(record.key());
            ControlRecordType type = ControlRecordType.fromTypeId(typeId);
            switch(type) {
                case LEADER_CHANGE:
                    LeaderChangeMessage message = new LeaderChangeMessage();
                    message.read(new ByteBufferAccessor(record.value()), (short) 0);
                    listener.handleLeaderChange(new LeaderAndEpoch(OptionalInt.of(message.leaderId()), batch.partitionLeaderEpoch()));
                    break;
                default:
                    log.error("Ignoring control record with type {} at offset {}", type, record.offset());
            }
        } catch (Throwable e) {
            log.error("unable to read control record at offset {}", record.offset(), e);
        }
    }
}
Also used : LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) Record(org.apache.kafka.common.record.Record) LeaderAndEpoch(org.apache.kafka.raft.LeaderAndEpoch) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) ControlRecordType(org.apache.kafka.common.record.ControlRecordType)

Aggregations

LeaderChangeMessage (org.apache.kafka.common.message.LeaderChangeMessage)10 ByteBuffer (java.nio.ByteBuffer)6 Voter (org.apache.kafka.common.message.LeaderChangeMessage.Voter)5 Test (org.junit.jupiter.api.Test)5 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 ArrayList (java.util.ArrayList)2 ByteBufferAccessor (org.apache.kafka.common.protocol.ByteBufferAccessor)2 Record (org.apache.kafka.common.record.Record)2 ArgumentsSource (org.junit.jupiter.params.provider.ArgumentsSource)2 Arrays (java.util.Arrays)1 Arrays.asList (java.util.Arrays.asList)1 Collections (java.util.Collections)1 List (java.util.List)1 OptionalLong (java.util.OptionalLong)1 Random (java.util.Random)1 BiFunction (java.util.function.BiFunction)1 Supplier (java.util.function.Supplier)1 Collectors (java.util.stream.Collectors)1 Stream (java.util.stream.Stream)1 UnsupportedCompressionTypeException (org.apache.kafka.common.errors.UnsupportedCompressionTypeException)1