Search in sources :

Example 6 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogLeaderEpochState method handleSegmentWithCopySegmentFinishedState.

void handleSegmentWithCopySegmentFinishedState(Long startOffset, RemoteLogSegmentId remoteLogSegmentId, Long leaderEpochEndOffset) {
    // Add the segment epochs mapping as the segment is copied successfully.
    RemoteLogSegmentId oldEntry = offsetToId.put(startOffset, remoteLogSegmentId);
    // Remove the metadata from unreferenced entries as it is successfully copied and added to the offset mapping.
    unreferencedSegmentIds.remove(remoteLogSegmentId);
    // Add the old entry to unreferenced entries as the mapping is removed for the old entry.
    if (oldEntry != null) {
        unreferencedSegmentIds.add(oldEntry);
    }
    // Update the highest offset entry for this leader epoch as we added a new mapping.
    if (highestLogOffset == null || leaderEpochEndOffset > highestLogOffset) {
        highestLogOffset = leaderEpochEndOffset;
    }
}
Also used : RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId)

Example 7 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogMetadataCache method remoteLogSegmentMetadata.

/**
 * Returns {@link RemoteLogSegmentMetadata} if it exists for the given leader-epoch containing the offset and with
 * {@link RemoteLogSegmentState#COPY_SEGMENT_FINISHED} state, else returns {@link Optional#empty()}.
 *
 * @param leaderEpoch leader epoch for the given offset
 * @param offset      offset
 * @return the requested remote log segment metadata if it exists.
 */
public Optional<RemoteLogSegmentMetadata> remoteLogSegmentMetadata(int leaderEpoch, long offset) {
    RemoteLogLeaderEpochState remoteLogLeaderEpochState = leaderEpochEntries.get(leaderEpoch);
    if (remoteLogLeaderEpochState == null) {
        return Optional.empty();
    }
    // Look for floor entry as the given offset may exist in this entry.
    RemoteLogSegmentId remoteLogSegmentId = remoteLogLeaderEpochState.floorEntry(offset);
    if (remoteLogSegmentId == null) {
        // If the offset is lower than the minimum offset available in metadata then return empty.
        return Optional.empty();
    }
    RemoteLogSegmentMetadata metadata = idToSegmentMetadata.get(remoteLogSegmentId);
    // Check whether the given offset with leaderEpoch exists in this segment.
    // Check for epoch's offset boundaries with in this segment.
    // 1. Get the next epoch's start offset -1 if exists
    // 2. If no next epoch exists, then segment end offset can be considered as epoch's relative end offset.
    Map.Entry<Integer, Long> nextEntry = metadata.segmentLeaderEpochs().higherEntry(leaderEpoch);
    long epochEndOffset = (nextEntry != null) ? nextEntry.getValue() - 1 : metadata.endOffset();
    // Return empty when target offset > epoch's end offset.
    return offset > epochEndOffset ? Optional.empty() : Optional.of(metadata);
}
Also used : RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NavigableMap(java.util.NavigableMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 8 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogMetadataCache method updateRemoteLogSegmentMetadata.

public void updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate metadataUpdate) throws RemoteResourceNotFoundException {
    log.debug("Updating remote log segment metadata: [{}]", metadataUpdate);
    Objects.requireNonNull(metadataUpdate, "metadataUpdate can not be null");
    RemoteLogSegmentState targetState = metadataUpdate.state();
    RemoteLogSegmentId remoteLogSegmentId = metadataUpdate.remoteLogSegmentId();
    RemoteLogSegmentMetadata existingMetadata = idToSegmentMetadata.get(remoteLogSegmentId);
    if (existingMetadata == null) {
        throw new RemoteResourceNotFoundException("No remote log segment metadata found for :" + remoteLogSegmentId);
    }
    // Check the state transition.
    checkStateTransition(existingMetadata.state(), targetState);
    switch(targetState) {
        case COPY_SEGMENT_STARTED:
            // RemoteLogSegmentState.COPY_SEGMENT_STARTED.
            throw new IllegalArgumentException("metadataUpdate: " + metadataUpdate + " with state " + RemoteLogSegmentState.COPY_SEGMENT_STARTED + " can not be updated");
        case COPY_SEGMENT_FINISHED:
            handleSegmentWithCopySegmentFinishedState(existingMetadata.createWithUpdates(metadataUpdate));
            break;
        case DELETE_SEGMENT_STARTED:
            handleSegmentWithDeleteSegmentStartedState(existingMetadata.createWithUpdates(metadataUpdate));
            break;
        case DELETE_SEGMENT_FINISHED:
            handleSegmentWithDeleteSegmentFinishedState(existingMetadata.createWithUpdates(metadataUpdate));
            break;
        default:
            throw new IllegalArgumentException("Metadata with the state " + targetState + " is not supported");
    }
}
Also used : RemoteResourceNotFoundException(org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException) RemoteLogSegmentState(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Example 9 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class FileBasedRemoteLogMetadataCacheTest method testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments.

@Test
public void testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments() throws Exception {
    TopicIdPartition partition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test", 0));
    int brokerId = 0;
    Path path = TestUtils.tempDirectory().toPath();
    // Create file based metadata cache.
    FileBasedRemoteLogMetadataCache cache = new FileBasedRemoteLogMetadataCache(partition, path);
    // Add a segment with start offset as 0 for leader epoch 0.
    RemoteLogSegmentId segmentId1 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
    RemoteLogSegmentMetadata metadata1 = new RemoteLogSegmentMetadata(segmentId1, 0, 100, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
    cache.addCopyInProgressSegment(metadata1);
    RemoteLogSegmentMetadataUpdate metadataUpdate1 = new RemoteLogSegmentMetadataUpdate(segmentId1, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
    cache.updateRemoteLogSegmentMetadata(metadataUpdate1);
    Optional<RemoteLogSegmentMetadata> receivedMetadata = cache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadata.isPresent());
    assertEquals(metadata1.createWithUpdates(metadataUpdate1), receivedMetadata.get());
    // Add a new segment with start offset as 0 for leader epoch 0, which should replace the earlier segment.
    RemoteLogSegmentId segmentId2 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
    RemoteLogSegmentMetadata metadata2 = new RemoteLogSegmentMetadata(segmentId2, 0, 900, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
    cache.addCopyInProgressSegment(metadata2);
    RemoteLogSegmentMetadataUpdate metadataUpdate2 = new RemoteLogSegmentMetadataUpdate(segmentId2, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
    cache.updateRemoteLogSegmentMetadata(metadataUpdate2);
    // Fetch segment for leader epoch:0 and start offset:0, it should be the newly added segment.
    Optional<RemoteLogSegmentMetadata> receivedMetadata2 = cache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadata2.isPresent());
    assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadata2.get());
    // Flush the cache to the file.
    cache.flushToFile(0, 0L);
    // Create a new cache with loading from the stored path.
    FileBasedRemoteLogMetadataCache loadedCache = new FileBasedRemoteLogMetadataCache(partition, path);
    // Fetch segment for leader epoch:0 and start offset:0, it should be metadata2.
    // This ensures that the ordering of metadata is taken care after loading from the stored snapshots.
    Optional<RemoteLogSegmentMetadata> receivedMetadataAfterLoad = loadedCache.remoteLogSegmentMetadata(0, 0L);
    assertTrue(receivedMetadataAfterLoad.isPresent());
    assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadataAfterLoad.get());
}
Also used : Path(java.nio.file.Path) RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) TopicPartition(org.apache.kafka.common.TopicPartition) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata) Test(org.junit.jupiter.api.Test)

Example 10 with RemoteLogSegmentId

use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.

the class RemoteLogMetadataCacheTest method createSegmentUpdateWithState.

private RemoteLogSegmentMetadata createSegmentUpdateWithState(RemoteLogMetadataCache cache, Map<Integer, Long> segmentLeaderEpochs, long startOffset, long endOffset, RemoteLogSegmentState state) throws RemoteResourceNotFoundException {
    RemoteLogSegmentId segmentId = new RemoteLogSegmentId(TP0, Uuid.randomUuid());
    RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(segmentId, startOffset, endOffset, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, segmentLeaderEpochs);
    cache.addCopyInProgressSegment(segmentMetadata);
    RemoteLogSegmentMetadataUpdate segMetadataUpdate = new RemoteLogSegmentMetadataUpdate(segmentId, time.milliseconds(), state, BROKER_ID_1);
    cache.updateRemoteLogSegmentMetadata(segMetadataUpdate);
    return segmentMetadata.createWithUpdates(segMetadataUpdate);
}
Also used : RemoteLogSegmentMetadataUpdate(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate) RemoteLogSegmentId(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId) RemoteLogSegmentMetadata(org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)

Aggregations

RemoteLogSegmentId (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId)21 RemoteLogSegmentMetadata (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata)14 RemoteLogSegmentMetadataUpdate (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate)9 TopicIdPartition (org.apache.kafka.common.TopicIdPartition)6 HashMap (java.util.HashMap)5 Test (org.junit.jupiter.api.Test)5 Map (java.util.Map)4 TopicPartition (org.apache.kafka.common.TopicPartition)4 RemoteResourceNotFoundException (org.apache.kafka.server.log.remote.storage.RemoteResourceNotFoundException)4 ArrayList (java.util.ArrayList)3 NavigableMap (java.util.NavigableMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 ConcurrentMap (java.util.concurrent.ConcurrentMap)3 RemoteLogSegmentState (org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState)3 Path (java.nio.file.Path)2 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2 MethodSource (org.junit.jupiter.params.provider.MethodSource)2 Seq (scala.collection.Seq)2 File (java.io.File)1 Collections (java.util.Collections)1