Search in sources :

Example 1 with Batch

use of org.apache.kafka.raft.Batch in project kafka by apache.

the class SnapshotFileReader method handleMetadataBatch.

private void handleMetadataBatch(FileChannelRecordBatch batch) {
    List<ApiMessageAndVersion> messages = new ArrayList<>();
    for (Record record : batch) {
        ByteBufferAccessor accessor = new ByteBufferAccessor(record.value());
        try {
            ApiMessageAndVersion messageAndVersion = serde.read(accessor, record.valueSize());
            messages.add(messageAndVersion);
        } catch (Throwable e) {
            log.error("unable to read metadata record at offset {}", record.offset(), e);
        }
    }
    listener.handleCommit(MemoryBatchReader.of(Collections.singletonList(Batch.data(batch.baseOffset(), batch.partitionLeaderEpoch(), batch.maxTimestamp(), batch.sizeInBytes(), messages)), reader -> {
    }));
}
Also used : MemoryBatchReader(org.apache.kafka.raft.internals.MemoryBatchReader) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor) LoggerFactory(org.slf4j.LoggerFactory) CompletableFuture(java.util.concurrent.CompletableFuture) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) OptionalInt(java.util.OptionalInt) Record(org.apache.kafka.common.record.Record) ArrayList(java.util.ArrayList) KafkaEventQueue(org.apache.kafka.queue.KafkaEventQueue) LogContext(org.apache.kafka.common.utils.LogContext) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) FileRecords(org.apache.kafka.common.record.FileRecords) LeaderChangeMessage(org.apache.kafka.common.message.LeaderChangeMessage) MetadataRecordSerde(org.apache.kafka.metadata.MetadataRecordSerde) Logger(org.slf4j.Logger) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) FileChannelRecordBatch(org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch) File(java.io.File) Batch(org.apache.kafka.raft.Batch) List(java.util.List) EventQueue(org.apache.kafka.queue.EventQueue) LeaderAndEpoch(org.apache.kafka.raft.LeaderAndEpoch) RaftClient(org.apache.kafka.raft.RaftClient) Collections(java.util.Collections) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) ArrayList(java.util.ArrayList) Record(org.apache.kafka.common.record.Record) ByteBufferAccessor(org.apache.kafka.common.protocol.ByteBufferAccessor)

Example 2 with Batch

use of org.apache.kafka.raft.Batch in project kafka by apache.

the class LocalLogManager method scheduleAppend.

@Override
public long scheduleAppend(int epoch, List<ApiMessageAndVersion> batch) {
    if (batch.isEmpty()) {
        throw new IllegalArgumentException("Batch cannot be empty");
    }
    List<ApiMessageAndVersion> first = batch.subList(0, batch.size() / 2);
    List<ApiMessageAndVersion> second = batch.subList(batch.size() / 2, batch.size());
    assertEquals(batch.size(), first.size() + second.size());
    assertFalse(second.isEmpty());
    OptionalLong firstOffset = first.stream().mapToLong(record -> scheduleAtomicAppend(epoch, Collections.singletonList(record))).max();
    if (firstOffset.isPresent() && resignAfterNonAtomicCommit.getAndSet(false)) {
        // Emulate losing leadership in the middle of a non-atomic append by not writing
        // the rest of the batch and instead writing a leader change message
        resign(leader.epoch());
        return firstOffset.getAsLong() + second.size();
    } else {
        return second.stream().mapToLong(record -> scheduleAtomicAppend(epoch, Collections.singletonList(record))).max().getAsLong();
    }
}
Also used : IntStream(java.util.stream.IntStream) MockTime(org.apache.kafka.common.utils.MockTime) MockRawSnapshotWriter(org.apache.kafka.snapshot.MockRawSnapshotWriter) MemoryBatchReader(org.apache.kafka.raft.internals.MemoryBatchReader) LoggerFactory(org.slf4j.LoggerFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) SimpleImmutableEntry(java.util.AbstractMap.SimpleImmutableEntry) RecordsSnapshotWriter(org.apache.kafka.snapshot.RecordsSnapshotWriter) OptionalInt(java.util.OptionalInt) MockRawSnapshotReader(org.apache.kafka.snapshot.MockRawSnapshotReader) OptionalLong(java.util.OptionalLong) MemoryPool(org.apache.kafka.common.memory.MemoryPool) SnapshotWriter(org.apache.kafka.snapshot.SnapshotWriter) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) KafkaEventQueue(org.apache.kafka.queue.KafkaEventQueue) LogContext(org.apache.kafka.common.utils.LogContext) RawSnapshotWriter(org.apache.kafka.snapshot.RawSnapshotWriter) Map(java.util.Map) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) SnapshotReader(org.apache.kafka.snapshot.SnapshotReader) OffsetAndEpoch(org.apache.kafka.raft.OffsetAndEpoch) RecordsSnapshotReader(org.apache.kafka.snapshot.RecordsSnapshotReader) MetadataRecordSerde(org.apache.kafka.metadata.MetadataRecordSerde) CompressionType(org.apache.kafka.common.record.CompressionType) RawSnapshotReader(org.apache.kafka.snapshot.RawSnapshotReader) Logger(org.slf4j.Logger) IdentityHashMap(java.util.IdentityHashMap) Time(org.apache.kafka.common.utils.Time) Iterator(java.util.Iterator) NavigableMap(java.util.NavigableMap) Collectors(java.util.stream.Collectors) Batch(org.apache.kafka.raft.Batch) Objects(java.util.Objects) ExecutionException(java.util.concurrent.ExecutionException) List(java.util.List) ObjectSerializationCache(org.apache.kafka.common.protocol.ObjectSerializationCache) TreeMap(java.util.TreeMap) EventQueue(org.apache.kafka.queue.EventQueue) Entry(java.util.Map.Entry) Optional(java.util.Optional) LeaderAndEpoch(org.apache.kafka.raft.LeaderAndEpoch) RaftClient(org.apache.kafka.raft.RaftClient) Collections(java.util.Collections) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) OptionalLong(java.util.OptionalLong)

Example 3 with Batch

use of org.apache.kafka.raft.Batch in project kafka by apache.

the class QuorumControllerTest method checkSnapshotSubcontent.

/**
 * This function checks that the iterator is a subset of the expected list.
 *
 * This is needed because when generating snapshots through configuration is difficult to control exactly when a
 * snapshot will be generated and which committed offset will be included in the snapshot.
 */
private void checkSnapshotSubcontent(List<ApiMessageAndVersion> expected, Iterator<Batch<ApiMessageAndVersion>> iterator) throws Exception {
    RecordTestUtils.deepSortRecords(expected);
    List<ApiMessageAndVersion> actual = StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false).flatMap(batch -> batch.records().stream()).collect(Collectors.toList());
    RecordTestUtils.deepSortRecords(actual);
    int expectedIndex = 0;
    for (ApiMessageAndVersion current : actual) {
        while (expectedIndex < expected.size() && !expected.get(expectedIndex).equals(current)) {
            expectedIndex += 1;
        }
        expectedIndex += 1;
    }
    assertTrue(expectedIndex <= expected.size(), String.format("actual is not a subset of expected: expected = %s; actual = %s", expected, actual));
}
Also used : Arrays(java.util.Arrays) BrokerRegistrationRequestData(org.apache.kafka.common.message.BrokerRegistrationRequestData) BrokerIdNotRegisteredException(org.apache.kafka.common.errors.BrokerIdNotRegisteredException) ConfigurationControlManagerTest.entry(org.apache.kafka.controller.ConfigurationControlManagerTest.entry) Spliterators(java.util.Spliterators) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) ElectLeadersRequestData(org.apache.kafka.common.message.ElectLeadersRequestData) Listener(org.apache.kafka.common.message.BrokerRegistrationRequestData.Listener) CreatePartitionsTopicResult(org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult) Future(java.util.concurrent.Future) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) CreatePartitionsTopic(org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) Map(java.util.Map) SET(org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET) CreatableReplicaAssignmentCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignmentCollection) ListPartitionReassignmentsRequestData(org.apache.kafka.common.message.ListPartitionReassignmentsRequestData) TopicRecord(org.apache.kafka.common.metadata.TopicRecord) RawSnapshotReader(org.apache.kafka.snapshot.RawSnapshotReader) TestUtils(org.apache.kafka.test.TestUtils) PartitionRegistration(org.apache.kafka.metadata.PartitionRegistration) Collectors(java.util.stream.Collectors) Test(org.junit.jupiter.api.Test) CountDownLatch(java.util.concurrent.CountDownLatch) ConfigResourceExistenceChecker(org.apache.kafka.controller.QuorumController.ConfigResourceExistenceChecker) List(java.util.List) TopicIdPartition(org.apache.kafka.controller.BrokersToIsrs.TopicIdPartition) AllocateProducerIdsRequestData(org.apache.kafka.common.message.AllocateProducerIdsRequestData) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) BrokerEndpoint(org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint) Optional(java.util.Optional) Errors(org.apache.kafka.common.protocol.Errors) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) CreateTopicsResponseData(org.apache.kafka.common.message.CreateTopicsResponseData) Spliterator(java.util.Spliterator) IntStream(java.util.stream.IntStream) Uuid(org.apache.kafka.common.Uuid) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) CONFIGS(org.apache.kafka.controller.ConfigurationControlManagerTest.CONFIGS) ListPartitionReassignmentsResponseData(org.apache.kafka.common.message.ListPartitionReassignmentsResponseData) LocalLogManagerTestEnv(org.apache.kafka.metalog.LocalLogManagerTestEnv) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) CreatableTopic(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic) ApiError(org.apache.kafka.common.requests.ApiError) TOPIC(org.apache.kafka.common.config.ConfigResource.Type.TOPIC) ConfigResource(org.apache.kafka.common.config.ConfigResource) BrokerHeartbeatReply(org.apache.kafka.metadata.BrokerHeartbeatReply) AlterPartitionReassignmentsRequestData(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) StreamSupport(java.util.stream.StreamSupport) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) SnapshotReader(org.apache.kafka.snapshot.SnapshotReader) Utils(org.apache.kafka.common.utils.Utils) RecordsSnapshotReader(org.apache.kafka.snapshot.RecordsSnapshotReader) MetadataRecordSerde(org.apache.kafka.metadata.MetadataRecordSerde) BROKER0(org.apache.kafka.controller.ConfigurationControlManagerTest.BROKER0) TimeoutException(org.apache.kafka.common.errors.TimeoutException) CreatableReplicaAssignment(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableReplicaAssignment) Iterator(java.util.Iterator) BrokerRegistrationReply(org.apache.kafka.metadata.BrokerRegistrationReply) ElectLeadersResponseData(org.apache.kafka.common.message.ElectLeadersResponseData) BrokerHeartbeatRequestData(org.apache.kafka.common.message.BrokerHeartbeatRequestData) BROKER(org.apache.kafka.common.config.ConfigResource.Type.BROKER) Batch(org.apache.kafka.raft.Batch) Assertions.assertSame(org.junit.jupiter.api.Assertions.assertSame) ExecutionException(java.util.concurrent.ExecutionException) AlterPartitionReassignmentsResponseData(org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData) CreatableTopicCollection(org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCollection) CreateTopicsRequestData(org.apache.kafka.common.message.CreateTopicsRequestData) RecordTestUtils(org.apache.kafka.metadata.RecordTestUtils) ReassignableTopic(org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic) PartitionRecord(org.apache.kafka.common.metadata.PartitionRecord) AlterIsrRequestData(org.apache.kafka.common.message.AlterIsrRequestData) RegisterBrokerRecord(org.apache.kafka.common.metadata.RegisterBrokerRecord) HOURS(java.util.concurrent.TimeUnit.HOURS) Collections(java.util.Collections) Timeout(org.junit.jupiter.api.Timeout) ListenerCollection(org.apache.kafka.common.message.BrokerRegistrationRequestData.ListenerCollection) BrokerEndpointCollection(org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpointCollection) ProducerIdsRecord(org.apache.kafka.common.metadata.ProducerIdsRecord) ApiMessageAndVersion(org.apache.kafka.server.common.ApiMessageAndVersion) BrokerEndpoint(org.apache.kafka.common.metadata.RegisterBrokerRecord.BrokerEndpoint)

Aggregations

Collections (java.util.Collections)3 Iterator (java.util.Iterator)3 List (java.util.List)3 CompletableFuture (java.util.concurrent.CompletableFuture)3 MetadataRecordSerde (org.apache.kafka.metadata.MetadataRecordSerde)3 Batch (org.apache.kafka.raft.Batch)3 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 Optional (java.util.Optional)2 OptionalInt (java.util.OptionalInt)2 ExecutionException (java.util.concurrent.ExecutionException)2 Collectors (java.util.stream.Collectors)2 IntStream (java.util.stream.IntStream)2 BufferSupplier (org.apache.kafka.common.utils.BufferSupplier)2 RawSnapshotReader (org.apache.kafka.snapshot.RawSnapshotReader)2 RecordsSnapshotReader (org.apache.kafka.snapshot.RecordsSnapshotReader)2 SnapshotReader (org.apache.kafka.snapshot.SnapshotReader)2 Assertions.assertEquals (org.junit.jupiter.api.Assertions.assertEquals)2 Assertions.assertFalse (org.junit.jupiter.api.Assertions.assertFalse)2