Search in sources :

Example 16 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class TopicBasedRemoteLogMetadataManagerRestartTest method testRLMMAPIsAfterRestart.

@Test
public void testRLMMAPIsAfterRestart() throws Exception {
    // Create topics.
    String leaderTopic = "new-leader";
    HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
    List<Object> leaderTopicReplicas = new ArrayList<>();
    // Set broker id 0 as the first entry which is taken as the leader.
    leaderTopicReplicas.add(0);
    leaderTopicReplicas.add(1);
    leaderTopicReplicas.add(2);
    assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
    remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
    String followerTopic = "new-follower";
    HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
    List<Object> followerTopicReplicas = new ArrayList<>();
    // Set broker id 1 as the first entry which is taken as the leader.
    followerTopicReplicas.add(1);
    followerTopicReplicas.add(2);
    followerTopicReplicas.add(0);
    assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
    remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
    final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
    final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
    // Register these partitions to RLMM.
    topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
    // Add segments for these partitions but they are not available as they have not yet been subscribed.
    RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get();
    RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get();
    // Stop TopicBasedRemoteLogMetadataManager only.
    stopTopicBasedRemoteLogMetadataManagerHarness();
    // Start TopicBasedRemoteLogMetadataManager but do not start consumer thread to check whether the stored metadata is
    // loaded successfully or not.
    startTopicBasedRemoteLogMetadataManagerHarness(false);
    // Register these partitions to RLMM, which loads the respective metadata snapshots.
    topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
    // Check for the stored entries from the earlier run.
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)));
    // Check whether the check-pointed consumer offsets are stored or not.
    Path committedOffsetsPath = new File(logDir, COMMITTED_OFFSETS_FILE_NAME).toPath();
    Assertions.assertTrue(committedOffsetsPath.toFile().exists());
    CommittedOffsetsFile committedOffsetsFile = new CommittedOffsetsFile(committedOffsetsPath.toFile());
    int metadataPartition1 = topicBasedRlmm().metadataPartition(leaderTopicIdPartition);
    int metadataPartition2 = topicBasedRlmm().metadataPartition(followerTopicIdPartition);
    Optional<Long> receivedOffsetForPartition1 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition1);
    Optional<Long> receivedOffsetForPartition2 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition2);
    Assertions.assertTrue(receivedOffsetForPartition1.isPresent());
    Assertions.assertTrue(receivedOffsetForPartition2.isPresent());
    // Make sure these offsets are at least 0.
    Assertions.assertTrue(receivedOffsetForPartition1.get() >= 0);
    Assertions.assertTrue(receivedOffsetForPartition2.get() >= 0);
    // Check the stored entries and the offsets that were set on consumer are the same.
    Map<Integer, Long> partitionToOffset = committedOffsetsFile.readEntries();
    Assertions.assertEquals(partitionToOffset.get(metadataPartition1), receivedOffsetForPartition1.get());
    Assertions.assertEquals(partitionToOffset.get(metadataPartition2), receivedOffsetForPartition2.get());
    // Start Consumer thread
    topicBasedRlmm().startConsumerThread();
    // Add one more segment
    RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 101, 200, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get();
    // Check that both the stored segment and recently added segment are available.
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
}
Also used : Path(java.nio.file.Path) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) TopicPartition(org.apache.kafka.common.TopicPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) File(java.io.File) Seq(scala.collection.Seq) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Example 17 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogLeaderEpochState method collectConvertedIdToMetadata.

private void collectConvertedIdToMetadata(Collection<RemoteLogSegmentId> segmentIds, Map<RemoteLogSegmentId, RemoteLogSegmentMetadata> idToSegmentMetadata, Collection<RemoteLogSegmentMetadata> result) throws RemoteResourceNotFoundException {
    for (RemoteLogSegmentId id : segmentIds) {
        RemoteLogSegmentMetadata metadata = idToSegmentMetadata.get(id);
        if (metadata == null) {
            throw new RemoteResourceNotFoundException("No remote log segment metadata found for :" + id);
        }
        result.add(metadata);
    }
}
Also used : RemoteResourceNotFoundException(org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 18 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogMetadataCache method doHandleSegmentStateTransitionForLeaderEpochs.

private void doHandleSegmentStateTransitionForLeaderEpochs(RemoteLogSegmentMetadata remoteLogSegmentMetadata, RemoteLogLeaderEpochState.Action action) {
    RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
    Map<Integer, Long> leaderEpochToOffset = remoteLogSegmentMetadata.segmentLeaderEpochs();
    // Go through all the leader epochs and apply the given action.
    for (Map.Entry<Integer, Long> entry : leaderEpochToOffset.entrySet()) {
        Integer leaderEpoch = entry.getKey();
        Long startOffset = entry.getValue();
        // leaderEpochEntries will be empty when resorting the metadata from snapshot.
        RemoteLogLeaderEpochState remoteLogLeaderEpochState = leaderEpochEntries.computeIfAbsent(leaderEpoch, x -> new RemoteLogLeaderEpochState());
        action.accept(leaderEpoch, remoteLogLeaderEpochState, startOffset, remoteLogSegmentId);
    }
}
Also used : RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map)

Example 19 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogMetadataCache method addCopyInProgressSegment.

/**
 * This method tracks the given remote segment as not yet available for reads. It does not add the segment
 * leader epoch offset mapping until this segment reaches COPY_SEGMENT_FINISHED state.
 *
 * @param remoteLogSegmentMetadata RemoteLogSegmentMetadata instance
 */
public void addCopyInProgressSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata) {
    log.debug("Adding to in-progress state: [{}]", remoteLogSegmentMetadata);
    Objects.requireNonNull(remoteLogSegmentMetadata, "remoteLogSegmentMetadata can not be null");
    // but not to update the existing remote log segment metadata.
    if (remoteLogSegmentMetadata.state() != RemoteLogSegmentState.COPY_SEGMENT_STARTED) {
        throw new IllegalArgumentException("Given remoteLogSegmentMetadata:" + remoteLogSegmentMetadata + " should have state as " + RemoteLogSegmentState.COPY_SEGMENT_STARTED + " but it contains state as: " + remoteLogSegmentMetadata.state());
    }
    RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
    RemoteLogSegmentMetadata existingMetadata = idToSegmentMetadata.get(remoteLogSegmentId);
    checkStateTransition(existingMetadata != null ? existingMetadata.state() : null, remoteLogSegmentMetadata.state());
    for (Integer epoch : remoteLogSegmentMetadata.segmentLeaderEpochs().keySet()) {
        leaderEpochEntries.computeIfAbsent(epoch, leaderEpoch -> new RemoteLogLeaderEpochState()).handleSegmentWithCopySegmentStartedState(remoteLogSegmentId);
    }
    idToSegmentMetadata.put(remoteLogSegmentId, remoteLogSegmentMetadata);
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentState(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) LoggerFactory(org.slf4j.LoggerFactory) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) ConcurrentMap(java.util.concurrent.ConcurrentMap) Objects(java.util.Objects) Map(java.util.Map) Optional(java.util.Optional) Collections(java.util.Collections) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteResourceNotFoundException(org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 20 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogSegmentMetadataUpdateTransform method fromApiMessageAndVersion.

public RemoteLogSegmentMetadataUpdate fromApiMessageAndVersion(ApiMessageAndVersion apiMessageAndVersion) {
    RemoteLogSegmentMetadataUpdateRecord record = (RemoteLogSegmentMetadataUpdateRecord) apiMessageAndVersion.message();
    RemoteLogSegmentMetadataUpdateRecord.RemoteLogSegmentIdEntry entry = record.remoteLogSegmentId();
    TopicIdPartition topicIdPartition = new TopicIdPartition(entry.topicIdPartition().id(), new TopicPartition(entry.topicIdPartition().name(), entry.topicIdPartition().partition()));
    return new RemoteLogSegmentMetadataUpdate(new RemoteLogSegmentId(topicIdPartition, entry.id()), record.eventTimestampMs(), RemoteLogSegmentState.forId(record.remoteLogSegmentState()), record.brokerId());
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadataUpdateRecord(org.apache.kafka.server.log.remote.metadata.storage.generated.RemoteLogSegmentMetadataUpdateRecord)

Aggregations

RemoteLogSegmentId (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId)21 RemoteLogSegmentMetadata (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)14 RemoteLogSegmentMetadataUpdate (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate)9 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)6 HashMap (java.util.HashMap)5 Test (org.junit.jupiter.api.Test)5 Map (java.util.Map)4 TopicPartition (org.apache.kafka.common.TopicPartition)4 RemoteResourceNotFoundException (org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException)4 ArrayList (java.util.ArrayList)3 NavigableMap (java.util.NavigableMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 ConcurrentMap (java.util.concurrent.ConcurrentMap)3 RemoteLogSegmentState (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState)3 Path (java.nio.file.Path)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 MethodSource (org.junit.jupiter.params.provider.MethodSource)2 Seq (scala.collection.Seq)2 File (java.io.File)1 Collections (java.util.Collections)1