Search in sources :

Example 1 with RemoteLogSegmentMetadataUpdate

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate in project kafka by apache.

the class RemoteLogMetadataSerdeTest method testRemoteLogSegmentMetadataUpdateSerde.

@Test
public void testRemoteLogSegmentMetadataUpdateSerde() {
    RemoteLogSegmentMetadataUpdate remoteLogSegmentMetadataUpdate = createRemoteLogSegmentMetadataUpdate();
    doTestRemoteLogMetadataSerde(remoteLogSegmentMetadataUpdate);
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) Test(org.junit.jupiter.api.Test)

Example 2 with RemoteLogSegmentMetadataUpdate

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate in project kafka by apache.

the class RemoteLogSegmentLifecycleTest method testRemoteLogSegmentLifeCycle.

@ParameterizedTest(name = "remoteLogSegmentLifecycleManager = {0}")
@MethodSource("remoteLogSegmentLifecycleManagers")
public void testRemoteLogSegmentLifeCycle(RemoteLogSegmentLifecycleManager remoteLogSegmentLifecycleManager) throws Exception {
    try {
        remoteLogSegmentLifecycleManager.initialize(topicIdPartition);
        // segment 0
        // offsets: [0-100]
        // leader epochs (0,0), (1,20), (2,80)
        Map<Integer, Long> segment0LeaderEpochs = new HashMap<>();
        segment0LeaderEpochs.put(0, 0L);
        segment0LeaderEpochs.put(1, 20L);
        segment0LeaderEpochs.put(2, 80L);
        RemoteLogSegmentId segment0Id = new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid());
        RemoteLogSegmentMetadata segment0Metadata = new RemoteLogSegmentMetadata(segment0Id, 0L, 100L, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, segment0LeaderEpochs);
        remoteLogSegmentLifecycleManager.addRemoteLogSegmentMetadata(segment0Metadata);
        // We should not get this as the segment is still getting copied and it is not yet considered successful until
        // it reaches RemoteLogSegmentState.COPY_SEGMENT_FINISHED.
        Assertions.assertFalse(remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(40, 1).isPresent());
        // Check that these leader epochs are not to be considered for highestOffsetForEpoch API as they are still getting copied.
        Stream.of(0, 1, 2).forEach(epoch -> {
            try {
                Assertions.assertFalse(remoteLogSegmentLifecycleManager.highestOffsetForEpoch(epoch).isPresent());
            } catch (RemoteStorageException e) {
                Assertions.fail(e);
            }
        });
        RemoteLogSegmentMetadataUpdate segment0Update = new RemoteLogSegmentMetadataUpdate(segment0Id, time.milliseconds(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, BROKER_ID_1);
        remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(segment0Update);
        RemoteLogSegmentMetadata expectedSegment0Metadata = segment0Metadata.createWithUpdates(segment0Update);
        // segment 1
        // offsets: [101 - 200]
        // no changes in leadership with in this segment
        // leader epochs (2, 101)
        Map<Integer, Long> segment1LeaderEpochs = Collections.singletonMap(2, 101L);
        RemoteLogSegmentMetadata segment1Metadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, segment1LeaderEpochs, 101L, 200L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
        // segment 2
        // offsets: [201 - 300]
        // moved to epoch 3 in between
        // leader epochs (2, 201), (3, 240)
        Map<Integer, Long> segment2LeaderEpochs = new HashMap<>();
        segment2LeaderEpochs.put(2, 201L);
        segment2LeaderEpochs.put(3, 240L);
        RemoteLogSegmentMetadata segment2Metadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, segment2LeaderEpochs, 201L, 300L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
        // segment 3
        // offsets: [250 - 400]
        // leader epochs (3, 250), (4, 370)
        Map<Integer, Long> segment3LeaderEpochs = new HashMap<>();
        segment3LeaderEpochs.put(3, 250L);
        segment3LeaderEpochs.put(4, 370L);
        RemoteLogSegmentMetadata segment3Metadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, segment3LeaderEpochs, 250L, 400L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
        // ////////////////////////////////////////////////////////////////////////////////////////
        // Four segments are added with different boundaries and leader epochs.
        // Search for cache.remoteLogSegmentMetadata(leaderEpoch, offset)  for different
        // epochs and offsets
        // ////////////////////////////////////////////////////////////////////////////////////////
        HashMap<EpochOffset, RemoteLogSegmentMetadata> expectedEpochOffsetToSegmentMetadata = new HashMap<>();
        // Existing metadata entries.
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(1, 40), expectedSegment0Metadata);
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(2, 110), segment1Metadata);
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(3, 240), segment2Metadata);
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(3, 250), segment3Metadata);
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(4, 375), segment3Metadata);
        // Non existing metadata entries.
        // Search for offset 110, epoch 1, and it should not exist.
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(1, 110), null);
        // Search for non existing offset 401, epoch 4.
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(4, 401), null);
        // Search for non existing epoch 5.
        expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(5, 301), null);
        for (Map.Entry<EpochOffset, RemoteLogSegmentMetadata> entry : expectedEpochOffsetToSegmentMetadata.entrySet()) {
            EpochOffset epochOffset = entry.getKey();
            Optional<RemoteLogSegmentMetadata> segmentMetadata = remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(epochOffset.epoch, epochOffset.offset);
            RemoteLogSegmentMetadata expectedSegmentMetadata = entry.getValue();
            log.debug("Searching for {} , result: {}, expected: {} ", epochOffset, segmentMetadata, expectedSegmentMetadata);
            if (expectedSegmentMetadata != null) {
                Assertions.assertEquals(Optional.of(expectedSegmentMetadata), segmentMetadata);
            } else {
                Assertions.assertFalse(segmentMetadata.isPresent());
            }
        }
        // Update segment with state as DELETE_SEGMENT_STARTED.
        // It should not be available when we search for that segment.
        remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(expectedSegment0Metadata.remoteLogSegmentId(), time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_STARTED, BROKER_ID_1));
        Assertions.assertFalse(remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 10).isPresent());
        // Update segment with state as DELETE_SEGMENT_FINISHED.
        // It should not be available when we search for that segment.
        remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(expectedSegment0Metadata.remoteLogSegmentId(), time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, BROKER_ID_1));
        Assertions.assertFalse(remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 10).isPresent());
        // ////////////////////////////////////////////////////////////////////////////////////////
        // Search for cache.highestLogOffset(leaderEpoch) for all the leader epochs
        // ////////////////////////////////////////////////////////////////////////////////////////
        Map<Integer, Long> expectedEpochToHighestOffset = new HashMap<>();
        expectedEpochToHighestOffset.put(0, 19L);
        expectedEpochToHighestOffset.put(1, 79L);
        expectedEpochToHighestOffset.put(2, 239L);
        expectedEpochToHighestOffset.put(3, 369L);
        expectedEpochToHighestOffset.put(4, 400L);
        for (Map.Entry<Integer, Long> entry : expectedEpochToHighestOffset.entrySet()) {
            Integer epoch = entry.getKey();
            Long expectedOffset = entry.getValue();
            Optional<Long> offset = remoteLogSegmentLifecycleManager.highestOffsetForEpoch(epoch);
            log.debug("Fetching highest offset for epoch: {} , returned: {} , expected: {}", epoch, offset, expectedOffset);
            Assertions.assertEquals(Optional.of(expectedOffset), offset);
        }
        // Search for non existing leader epoch
        Optional<Long> highestOffsetForEpoch5 = remoteLogSegmentLifecycleManager.highestOffsetForEpoch(5);
        Assertions.assertFalse(highestOffsetForEpoch5.isPresent());
    } finally {
        Utils.closeQuietly(remoteLogSegmentLifecycleManager, "RemoteLogSegmentLifecycleManager");
    }
}
Also used : HashMap(java.util.HashMap) RemoteStorageException(org.apache.kafka.server.log.remote.storage.RemoteStorageException) RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) HashMap(java.util.HashMap) Map(java.util.Map) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 3 with RemoteLogSegmentMetadataUpdate

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate in project kafka by apache.

the class FileBasedRemoteLogMetadataCacheTest method testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments.

@Test
public void testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments() throws Exception {
    TopicIdPartition partition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test", 0));
    int brokerId = 0;
    Path path = TestUtils.tempDirectory().toPath();
    // Create file based metadata cache.
    FileBasedRemoteLogMetadataCache cache = new FileBasedRemoteLogMetadataCache(partition, path);
    // Add a segment with start offset as 0 for leader epoch 0.
    RemoteLogSegmentId segmentId1 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
    RemoteLogSegmentMetadata metadata1 = new RemoteLogSegmentMetadata(segmentId1, 0, 100, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
    cache.addCopyInProgressSegment(metadata1);
    RemoteLogSegmentMetadataUpdate metadataUpdate1 = new RemoteLogSegmentMetadataUpdate(segmentId1, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
    cache.updateRemoteLogSegmentMetadata(metadataUpdate1);
    Optional<RemoteLogSegmentMetadata> receivedMetadata = cache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadata.isPresent());
    assertEquals(metadata1.createWithUpdates(metadataUpdate1), receivedMetadata.get());
    // Add a new segment with start offset as 0 for leader epoch 0, which should replace the earlier segment.
    RemoteLogSegmentId segmentId2 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
    RemoteLogSegmentMetadata metadata2 = new RemoteLogSegmentMetadata(segmentId2, 0, 900, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
    cache.addCopyInProgressSegment(metadata2);
    RemoteLogSegmentMetadataUpdate metadataUpdate2 = new RemoteLogSegmentMetadataUpdate(segmentId2, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
    cache.updateRemoteLogSegmentMetadata(metadataUpdate2);
    // Fetch segment for leader epoch:0 and start offset:0, it should be the newly added segment.
    Optional<RemoteLogSegmentMetadata> receivedMetadata2 = cache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadata2.isPresent());
    assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadata2.get());
    // Flush the cache to the file.
    cache.flushToFile(0, 0L);
    // Create a new cache with loading from the stored path.
    FileBasedRemoteLogMetadataCache loadedCache = new FileBasedRemoteLogMetadataCache(partition, path);
    // Fetch segment for leader epoch:0 and start offset:0, it should be metadata2.
    // This ensures that the ordering of metadata is taken care after loading from the stored snapshots.
    Optional<RemoteLogSegmentMetadata> receivedMetadataAfterLoad = loadedCache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadataAfterLoad.isPresent());
    assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadataAfterLoad.get());
}
Also used : Path(java.nio.file.Path) RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Example 4 with RemoteLogSegmentMetadataUpdate

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate in project kafka by apache.

the class RemoteLogMetadataCacheTest method createSegmentUpdateWithState.

private RemoteLogSegmentMetadata createSegmentUpdateWithState(RemoteLogMetadataCache cache, Map<Integer, Long> segmentLeaderEpochs, long startOffset, long endOffset, RemoteLogSegmentState state) throws RemoteResourceNotFoundException {
    RemoteLogSegmentId segmentId = new RemoteLogSegmentId(TP0, Uuid.randomUuid());
    RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(segmentId, startOffset, endOffset, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, segmentLeaderEpochs);
    cache.addCopyInProgressSegment(segmentMetadata);
    RemoteLogSegmentMetadataUpdate segMetadataUpdate = new RemoteLogSegmentMetadataUpdate(segmentId, time.milliseconds(), state, BROKER_ID_1);
    cache.updateRemoteLogSegmentMetadata(segMetadataUpdate);
    return segmentMetadata.createWithUpdates(segMetadataUpdate);
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 5 with RemoteLogSegmentMetadataUpdate

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate in project kafka by apache.

the class RemoteLogMetadataCacheTest method testAPIsWithInvalidArgs.

@Test
public void testAPIsWithInvalidArgs() {
    RemoteLogMetadataCache cache = new RemoteLogMetadataCache();
    Assertions.assertThrows(NullPointerException.class, () -> cache.addCopyInProgressSegment(null));
    Assertions.assertThrows(NullPointerException.class, () -> cache.updateRemoteLogSegmentMetadata(null));
    // Check for invalid state updates to addCopyInProgressSegment method.
    for (RemoteLogSegmentState state : RemoteLogSegmentState.values()) {
        if (state != RemoteLogSegmentState.COPY_SEGMENT_STARTED) {
            RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(TP0, Uuid.randomUuid()), 0, 100L, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
            RemoteLogSegmentMetadata updatedMetadata = segmentMetadata.createWithUpdates(new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), state, BROKER_ID_1));
            Assertions.assertThrows(IllegalArgumentException.class, () -> cache.addCopyInProgressSegment(updatedMetadata));
        }
    }
    // Check for updating non existing segment-id.
    Assertions.assertThrows(RemoteResourceNotFoundException.class, () -> {
        RemoteLogSegmentId nonExistingId = new RemoteLogSegmentId(TP0, Uuid.randomUuid());
        cache.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(nonExistingId, time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_STARTED, BROKER_ID_1));
    });
    // Check for invalid state transition.
    Assertions.assertThrows(IllegalStateException.class, () -> {
        RemoteLogSegmentMetadata segmentMetadata = createSegmentUpdateWithState(cache, Collections.singletonMap(0, 0L), 0, 100, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
        cache.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, BROKER_ID_1));
    });
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentState(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Aggregations

RemoteLogSegmentMetadataUpdate (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate)10 RemoteLogSegmentId (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId)8 RemoteLogSegmentMetadata (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)7 Test (org.junit.jupiter.api.Test)4 HashMap (java.util.HashMap)2 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 MethodSource (org.junit.jupiter.params.provider.MethodSource)2 Path (java.nio.file.Path)1 Map (java.util.Map)1 ApiMessageAndVersion (org.apache.kafka.server.common.ApiMessageAndVersion)1 RemoteLogSegmentMetadataRecord (org.apache.kafka.server.log.remote.metadata.storage.generated.RemoteLogSegmentMetadataRecord)1 RemoteLogSegmentMetadataUpdateRecord (org.apache.kafka.server.log.remote.metadata.storage.generated.RemoteLogSegmentMetadataUpdateRecord)1 RemoteLogSegmentMetadataUpdateTransform (org.apache.kafka.server.log.remote.metadata.storage.serialization.RemoteLogSegmentMetadataUpdateTransform)1 RemoteLogSegmentState (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState)1 RemoteStorageException (org.apache.kafka.server.log.remote.storage.RemoteStorageException)1