use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogLeaderEpochState method handleSegmentWithCopySegmentFinishedState.
void handleSegmentWithCopySegmentFinishedState(Long startOffset, RemoteLogSegmentId remoteLogSegmentId, Long leaderEpochEndOffset) {
// Add the segment epochs mapping as the segment is copied successfully.
RemoteLogSegmentId oldEntry = offsetToId.put(startOffset, remoteLogSegmentId);
// Remove the metadata from unreferenced entries as it is successfully copied and added to the offset mapping.
unreferencedSegmentIds.remove(remoteLogSegmentId);
// Add the old entry to unreferenced entries as the mapping is removed for the old entry.
if (oldEntry != null) {
unreferencedSegmentIds.add(oldEntry);
}
// Update the highest offset entry for this leader epoch as we added a new mapping.
if (highestLogOffset == null || leaderEpochEndOffset > highestLogOffset) {
highestLogOffset = leaderEpochEndOffset;
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataCache method remoteLogSegmentMetadata.
/**
* Returns {@link RemoteLogSegmentMetadata} if it exists for the given leader-epoch containing the offset and with
* {@link RemoteLogSegmentState#COPY_SEGMENT_FINISHED} state, else returns {@link Optional#empty()}.
*
* @param leaderEpoch leader epoch for the given offset
* @param offset offset
* @return the requested remote log segment metadata if it exists.
*/
public Optional<RemoteLogSegmentMetadata> remoteLogSegmentMetadata(int leaderEpoch, long offset) {
RemoteLogLeaderEpochState remoteLogLeaderEpochState = leaderEpochEntries.get(leaderEpoch);
if (remoteLogLeaderEpochState == null) {
return Optional.empty();
}
// Look for floor entry as the given offset may exist in this entry.
RemoteLogSegmentId remoteLogSegmentId = remoteLogLeaderEpochState.floorEntry(offset);
if (remoteLogSegmentId == null) {
// If the offset is lower than the minimum offset available in metadata then return empty.
return Optional.empty();
}
RemoteLogSegmentMetadata metadata = idToSegmentMetadata.get(remoteLogSegmentId);
// Check whether the given offset with leaderEpoch exists in this segment.
// Check for epoch's offset boundaries with in this segment.
// 1. Get the next epoch's start offset -1 if exists
// 2. If no next epoch exists, then segment end offset can be considered as epoch's relative end offset.
Map.Entry<Integer, Long> nextEntry = metadata.segmentLeaderEpochs().higherEntry(leaderEpoch);
long epochEndOffset = (nextEntry != null) ? nextEntry.getValue() - 1 : metadata.endOffset();
// Return empty when target offset > epoch's end offset.
return offset > epochEndOffset ? Optional.empty() : Optional.of(metadata);
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataCache method updateRemoteLogSegmentMetadata.
public void updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate metadataUpdate) throws RemoteResourceNotFoundException {
log.debug("Updating remote log segment metadata: [{}]", metadataUpdate);
Objects.requireNonNull(metadataUpdate, "metadataUpdate can not be null");
RemoteLogSegmentState targetState = metadataUpdate.state();
RemoteLogSegmentId remoteLogSegmentId = metadataUpdate.remoteLogSegmentId();
RemoteLogSegmentMetadata existingMetadata = idToSegmentMetadata.get(remoteLogSegmentId);
if (existingMetadata == null) {
throw new RemoteResourceNotFoundException("No remote log segment metadata found for :" + remoteLogSegmentId);
}
// Check the state transition.
checkStateTransition(existingMetadata.state(), targetState);
switch(targetState) {
case COPY_SEGMENT_STARTED:
// RemoteLogSegmentState.COPY_SEGMENT_STARTED.
throw new IllegalArgumentException("metadataUpdate: " + metadataUpdate + " with state " + RemoteLogSegmentState.COPY_SEGMENT_STARTED + " can not be updated");
case COPY_SEGMENT_FINISHED:
handleSegmentWithCopySegmentFinishedState(existingMetadata.createWithUpdates(metadataUpdate));
break;
case DELETE_SEGMENT_STARTED:
handleSegmentWithDeleteSegmentStartedState(existingMetadata.createWithUpdates(metadataUpdate));
break;
case DELETE_SEGMENT_FINISHED:
handleSegmentWithDeleteSegmentFinishedState(existingMetadata.createWithUpdates(metadataUpdate));
break;
default:
throw new IllegalArgumentException("Metadata with the state " + targetState + " is not supported");
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class FileBasedRemoteLogMetadataCacheTest method testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments.
@Test
public void testFileBasedRemoteLogMetadataCacheWithUnreferencedSegments() throws Exception {
TopicIdPartition partition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("test", 0));
int brokerId = 0;
Path path = TestUtils.tempDirectory().toPath();
// Create file based metadata cache.
FileBasedRemoteLogMetadataCache cache = new FileBasedRemoteLogMetadataCache(partition, path);
// Add a segment with start offset as 0 for leader epoch 0.
RemoteLogSegmentId segmentId1 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
RemoteLogSegmentMetadata metadata1 = new RemoteLogSegmentMetadata(segmentId1, 0, 100, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
cache.addCopyInProgressSegment(metadata1);
RemoteLogSegmentMetadataUpdate metadataUpdate1 = new RemoteLogSegmentMetadataUpdate(segmentId1, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
cache.updateRemoteLogSegmentMetadata(metadataUpdate1);
Optional<RemoteLogSegmentMetadata> receivedMetadata = cache.remoteLogSegmentMetadata(0, 0L);
assertTrue(receivedMetadata.isPresent());
assertEquals(metadata1.createWithUpdates(metadataUpdate1), receivedMetadata.get());
// Add a new segment with start offset as 0 for leader epoch 0, which should replace the earlier segment.
RemoteLogSegmentId segmentId2 = new RemoteLogSegmentId(partition, Uuid.randomUuid());
RemoteLogSegmentMetadata metadata2 = new RemoteLogSegmentMetadata(segmentId2, 0, 900, System.currentTimeMillis(), brokerId, System.currentTimeMillis(), 1024 * 1024, Collections.singletonMap(0, 0L));
cache.addCopyInProgressSegment(metadata2);
RemoteLogSegmentMetadataUpdate metadataUpdate2 = new RemoteLogSegmentMetadataUpdate(segmentId2, System.currentTimeMillis(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId);
cache.updateRemoteLogSegmentMetadata(metadataUpdate2);
// Fetch segment for leader epoch:0 and start offset:0, it should be the newly added segment.
Optional<RemoteLogSegmentMetadata> receivedMetadata2 = cache.remoteLogSegmentMetadata(0, 0L);
assertTrue(receivedMetadata2.isPresent());
assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadata2.get());
// Flush the cache to the file.
cache.flushToFile(0, 0L);
// Create a new cache with loading from the stored path.
FileBasedRemoteLogMetadataCache loadedCache = new FileBasedRemoteLogMetadataCache(partition, path);
// Fetch segment for leader epoch:0 and start offset:0, it should be metadata2.
// This ensures that the ordering of metadata is taken care after loading from the stored snapshots.
Optional<RemoteLogSegmentMetadata> receivedMetadataAfterLoad = loadedCache.remoteLogSegmentMetadata(0, 0L);
assertTrue(receivedMetadataAfterLoad.isPresent());
assertEquals(metadata2.createWithUpdates(metadataUpdate2), receivedMetadataAfterLoad.get());
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataCacheTest method createSegmentUpdateWithState.
private RemoteLogSegmentMetadata createSegmentUpdateWithState(RemoteLogMetadataCache cache, Map<Integer, Long> segmentLeaderEpochs, long startOffset, long endOffset, RemoteLogSegmentState state) throws RemoteResourceNotFoundException {
RemoteLogSegmentId segmentId = new RemoteLogSegmentId(TP0, Uuid.randomUuid());
RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(segmentId, startOffset, endOffset, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, segmentLeaderEpochs);
cache.addCopyInProgressSegment(segmentMetadata);
RemoteLogSegmentMetadataUpdate segMetadataUpdate = new RemoteLogSegmentMetadataUpdate(segmentId, time.milliseconds(), state, BROKER_ID_1);
cache.updateRemoteLogSegmentMetadata(segMetadataUpdate);
return segmentMetadata.createWithUpdates(segMetadataUpdate);
}
Aggregations