Search in sources :

Example 16 with RemoteLogSegmentMetadata

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.

the class RemoteLogSegmentLifecycleTest method testCacheSegmentWithCopySegmentFinishedState.

@ParameterizedTest(name = "remoteLogSegmentLifecycleManager = {0}")
@MethodSource("remoteLogSegmentLifecycleManagers")
public void testCacheSegmentWithCopySegmentFinishedState(RemoteLogSegmentLifecycleManager remoteLogSegmentLifecycleManager) throws Exception {
    try {
        remoteLogSegmentLifecycleManager.initialize(topicIdPartition);
        // Create a segment and move it to state COPY_SEGMENT_FINISHED. and check for searching that segment and
        // listing the segments.
        RemoteLogSegmentMetadata segmentMetadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, Collections.singletonMap(0, 101L), 101L, 200L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
        // Search should return the above segment.
        Optional<RemoteLogSegmentMetadata> segMetadataForOffset150 = remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 150);
        Assertions.assertEquals(Optional.of(segmentMetadata), segMetadataForOffset150);
        // cache.listRemoteLogSegments should contain the above segments.
        checkListSegments(remoteLogSegmentLifecycleManager, 0, segmentMetadata);
    } finally {
        Utils.closeQuietly(remoteLogSegmentLifecycleManager, "RemoteLogSegmentLifecycleManager");
    }
}
Also used : RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 17 with RemoteLogSegmentMetadata

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.

the class TopicBasedRemoteLogMetadataManagerRestartTest method testRLMMAPIsAfterRestart.

@Test
public void testRLMMAPIsAfterRestart() throws Exception {
    // Create topics.
    String leaderTopic = "new-leader";
    HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
    List<Object> leaderTopicReplicas = new ArrayList<>();
    // Set broker id 0 as the first entry which is taken as the leader.
    leaderTopicReplicas.add(0);
    leaderTopicReplicas.add(1);
    leaderTopicReplicas.add(2);
    assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
    remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
    String followerTopic = "new-follower";
    HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
    List<Object> followerTopicReplicas = new ArrayList<>();
    // Set broker id 1 as the first entry which is taken as the leader.
    followerTopicReplicas.add(1);
    followerTopicReplicas.add(2);
    followerTopicReplicas.add(0);
    assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
    remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
    final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
    final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
    // Register these partitions to RLMM.
    topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
    // Add segments for these partitions but they are not available as they have not yet been subscribed.
    RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get();
    RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get();
    // Stop TopicBasedRemoteLogMetadataManager only.
    stopTopicBasedRemoteLogMetadataManagerHarness();
    // Start TopicBasedRemoteLogMetadataManager but do not start consumer thread to check whether the stored metadata is
    // loaded successfully or not.
    startTopicBasedRemoteLogMetadataManagerHarness(false);
    // Register these partitions to RLMM, which loads the respective metadata snapshots.
    topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
    // Check for the stored entries from the earlier run.
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)));
    // Check whether the check-pointed consumer offsets are stored or not.
    Path committedOffsetsPath = new File(logDir, COMMITTED_OFFSETS_FILE_NAME).toPath();
    Assertions.assertTrue(committedOffsetsPath.toFile().exists());
    CommittedOffsetsFile committedOffsetsFile = new CommittedOffsetsFile(committedOffsetsPath.toFile());
    int metadataPartition1 = topicBasedRlmm().metadataPartition(leaderTopicIdPartition);
    int metadataPartition2 = topicBasedRlmm().metadataPartition(followerTopicIdPartition);
    Optional<Long> receivedOffsetForPartition1 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition1);
    Optional<Long> receivedOffsetForPartition2 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition2);
    Assertions.assertTrue(receivedOffsetForPartition1.isPresent());
    Assertions.assertTrue(receivedOffsetForPartition2.isPresent());
    // Make sure these offsets are at least 0.
    Assertions.assertTrue(receivedOffsetForPartition1.get() >= 0);
    Assertions.assertTrue(receivedOffsetForPartition2.get() >= 0);
    // Check the stored entries and the offsets that were set on consumer are the same.
    Map<Integer, Long> partitionToOffset = committedOffsetsFile.readEntries();
    Assertions.assertEquals(partitionToOffset.get(metadataPartition1), receivedOffsetForPartition1.get());
    Assertions.assertEquals(partitionToOffset.get(metadataPartition2), receivedOffsetForPartition2.get());
    // Start Consumer thread
    topicBasedRlmm().startConsumerThread();
    // Add one more segment
    RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 101, 200, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L));
    topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get();
    // Check that both the stored segment and recently added segment are available.
    Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
}
Also used : Path(java.nio.file.Path) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) TopicPartition(org.apache.kafka.common.TopicPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) File(java.io.File) Seq(scala.collection.Seq) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Example 18 with RemoteLogSegmentMetadata

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.

the class RemoteLogLeaderEpochState method collectConvertedIdToMetadata.

private void collectConvertedIdToMetadata(Collection<RemoteLogSegmentId> segmentIds, Map<RemoteLogSegmentId, RemoteLogSegmentMetadata> idToSegmentMetadata, Collection<RemoteLogSegmentMetadata> result) throws RemoteResourceNotFoundException {
    for (RemoteLogSegmentId id : segmentIds) {
        RemoteLogSegmentMetadata metadata = idToSegmentMetadata.get(id);
        if (metadata == null) {
            throw new RemoteResourceNotFoundException("No remote log segment metadata found for :" + id);
        }
        result.add(metadata);
    }
}
Also used : RemoteResourceNotFoundException(org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 19 with RemoteLogSegmentMetadata

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.

the class RemoteLogMetadataCache method addCopyInProgressSegment.

/**
 * This method tracks the given remote segment as not yet available for reads. It does not add the segment
 * leader epoch offset mapping until this segment reaches COPY_SEGMENT_FINISHED state.
 *
 * @param remoteLogSegmentMetadata RemoteLogSegmentMetadata instance
 */
public void addCopyInProgressSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata) {
    log.debug("Adding to in-progress state: [{}]", remoteLogSegmentMetadata);
    Objects.requireNonNull(remoteLogSegmentMetadata, "remoteLogSegmentMetadata can not be null");
    // but not to update the existing remote log segment metadata.
    if (remoteLogSegmentMetadata.state() != RemoteLogSegmentState.COPY_SEGMENT_STARTED) {
        throw new IllegalArgumentException("Given remoteLogSegmentMetadata:" + remoteLogSegmentMetadata + " should have state as " + RemoteLogSegmentState.COPY_SEGMENT_STARTED + " but it contains state as: " + remoteLogSegmentMetadata.state());
    }
    RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
    RemoteLogSegmentMetadata existingMetadata = idToSegmentMetadata.get(remoteLogSegmentId);
    checkStateTransition(existingMetadata != null ? existingMetadata.state() : null, remoteLogSegmentMetadata.state());
    for (Integer epoch : remoteLogSegmentMetadata.segmentLeaderEpochs().keySet()) {
        leaderEpochEntries.computeIfAbsent(epoch, leaderEpoch -> new RemoteLogLeaderEpochState()).handleSegmentWithCopySegmentStartedState(remoteLogSegmentId);
    }
    idToSegmentMetadata.put(remoteLogSegmentId, remoteLogSegmentMetadata);
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentState(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) LoggerFactory(org.slf4j.LoggerFactory) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) ConcurrentMap(java.util.concurrent.ConcurrentMap) Objects(java.util.Objects) Map(java.util.Map) Optional(java.util.Optional) Collections(java.util.Collections) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteResourceNotFoundException(org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 20 with RemoteLogSegmentMetadata

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.

the class RemoteLogSegmentMetadataTransform method fromApiMessageAndVersion.

@Override
public RemoteLogSegmentMetadata fromApiMessageAndVersion(ApiMessageAndVersion apiMessageAndVersion) {
    RemoteLogSegmentMetadataRecord record = (RemoteLogSegmentMetadataRecord) apiMessageAndVersion.message();
    RemoteLogSegmentId remoteLogSegmentId = buildRemoteLogSegmentId(record.remoteLogSegmentId());
    Map<Integer, Long> segmentLeaderEpochs = new HashMap<>();
    for (RemoteLogSegmentMetadataRecord.SegmentLeaderEpochEntry segmentLeaderEpoch : record.segmentLeaderEpochs()) {
        segmentLeaderEpochs.put(segmentLeaderEpoch.leaderEpoch(), segmentLeaderEpoch.offset());
    }
    RemoteLogSegmentMetadata remoteLogSegmentMetadata = new RemoteLogSegmentMetadata(remoteLogSegmentId, record.startOffset(), record.endOffset(), record.maxTimestampMs(), record.brokerId(), record.eventTimestampMs(), record.segmentSizeInBytes(), segmentLeaderEpochs);
    RemoteLogSegmentMetadataUpdate rlsmUpdate = new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId, record.eventTimestampMs(), RemoteLogSegmentState.forId(record.remoteLogSegmentState()), record.brokerId());
    return remoteLogSegmentMetadata.createWithUpdates(rlsmUpdate);
}
Also used : HashMap(java.util.HashMap) RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentMetadataRecord(org.apache.kafka.server.log.remote.metadata.storage.generated.RemoteLogSegmentMetadataRecord) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Aggregations

RemoteLogSegmentMetadata (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)20 RemoteLogSegmentId (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId)14 RemoteLogSegmentMetadataUpdate (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate)8 HashMap (java.util.HashMap)6 Test (org.junit.jupiter.api.Test)6 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)6 MethodSource (org.junit.jupiter.params.provider.MethodSource)6 Map (java.util.Map)3 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)3 TopicPartition (org.apache.kafka.common.TopicPartition)3 RemoteLogSegmentState (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState)3 RemoteResourceNotFoundException (org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException)3 Path (java.nio.file.Path)2 ArrayList (java.util.ArrayList)2 NavigableMap (java.util.NavigableMap)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 ConcurrentMap (java.util.concurrent.ConcurrentMap)2 Seq (scala.collection.Seq)2 File (java.io.File)1 Collections (java.util.Collections)1