use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.
the class RemoteLogSegmentLifecycleTest method testCacheSegmentWithCopySegmentFinishedState.
@ParameterizedTest(name = "remoteLogSegmentLifecycleManager = {0}")
@MethodSource("remoteLogSegmentLifecycleManagers")
public void testCacheSegmentWithCopySegmentFinishedState(RemoteLogSegmentLifecycleManager remoteLogSegmentLifecycleManager) throws Exception {
try {
remoteLogSegmentLifecycleManager.initialize(topicIdPartition);
// Create a segment and move it to state COPY_SEGMENT_FINISHED. and check for searching that segment and
// listing the segments.
RemoteLogSegmentMetadata segmentMetadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, Collections.singletonMap(0, 101L), 101L, 200L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
// Search should return the above segment.
Optional<RemoteLogSegmentMetadata> segMetadataForOffset150 = remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 150);
Assertions.assertEquals(Optional.of(segmentMetadata), segMetadataForOffset150);
// cache.listRemoteLogSegments should contain the above segments.
checkListSegments(remoteLogSegmentLifecycleManager, 0, segmentMetadata);
} finally {
Utils.closeQuietly(remoteLogSegmentLifecycleManager, "RemoteLogSegmentLifecycleManager");
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.
the class TopicBasedRemoteLogMetadataManagerRestartTest method testRLMMAPIsAfterRestart.
@Test
public void testRLMMAPIsAfterRestart() throws Exception {
// Create topics.
String leaderTopic = "new-leader";
HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
List<Object> leaderTopicReplicas = new ArrayList<>();
// Set broker id 0 as the first entry which is taken as the leader.
leaderTopicReplicas.add(0);
leaderTopicReplicas.add(1);
leaderTopicReplicas.add(2);
assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
String followerTopic = "new-follower";
HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
List<Object> followerTopicReplicas = new ArrayList<>();
// Set broker id 1 as the first entry which is taken as the leader.
followerTopicReplicas.add(1);
followerTopicReplicas.add(2);
followerTopicReplicas.add(0);
assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
// Register these partitions to RLMM.
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
// Add segments for these partitions but they are not available as they have not yet been subscribed.
RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get();
RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get();
// Stop TopicBasedRemoteLogMetadataManager only.
stopTopicBasedRemoteLogMetadataManagerHarness();
// Start TopicBasedRemoteLogMetadataManager but do not start consumer thread to check whether the stored metadata is
// loaded successfully or not.
startTopicBasedRemoteLogMetadataManagerHarness(false);
// Register these partitions to RLMM, which loads the respective metadata snapshots.
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
// Check for the stored entries from the earlier run.
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)));
// Check whether the check-pointed consumer offsets are stored or not.
Path committedOffsetsPath = new File(logDir, COMMITTED_OFFSETS_FILE_NAME).toPath();
Assertions.assertTrue(committedOffsetsPath.toFile().exists());
CommittedOffsetsFile committedOffsetsFile = new CommittedOffsetsFile(committedOffsetsPath.toFile());
int metadataPartition1 = topicBasedRlmm().metadataPartition(leaderTopicIdPartition);
int metadataPartition2 = topicBasedRlmm().metadataPartition(followerTopicIdPartition);
Optional<Long> receivedOffsetForPartition1 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition1);
Optional<Long> receivedOffsetForPartition2 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition2);
Assertions.assertTrue(receivedOffsetForPartition1.isPresent());
Assertions.assertTrue(receivedOffsetForPartition2.isPresent());
// Make sure these offsets are at least 0.
Assertions.assertTrue(receivedOffsetForPartition1.get() >= 0);
Assertions.assertTrue(receivedOffsetForPartition2.get() >= 0);
// Check the stored entries and the offsets that were set on consumer are the same.
Map<Integer, Long> partitionToOffset = committedOffsetsFile.readEntries();
Assertions.assertEquals(partitionToOffset.get(metadataPartition1), receivedOffsetForPartition1.get());
Assertions.assertEquals(partitionToOffset.get(metadataPartition2), receivedOffsetForPartition2.get());
// Start Consumer thread
topicBasedRlmm().startConsumerThread();
// Add one more segment
RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 101, 200, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L));
topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get();
// Check that both the stored segment and recently added segment are available.
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.
the class RemoteLogLeaderEpochState method collectConvertedIdToMetadata.
private void collectConvertedIdToMetadata(Collection<RemoteLogSegmentId> segmentIds, Map<RemoteLogSegmentId, RemoteLogSegmentMetadata> idToSegmentMetadata, Collection<RemoteLogSegmentMetadata> result) throws RemoteResourceNotFoundException {
for (RemoteLogSegmentId id : segmentIds) {
RemoteLogSegmentMetadata metadata = idToSegmentMetadata.get(id);
if (metadata == null) {
throw new RemoteResourceNotFoundException("No remote log segment metadata found for :" + id);
}
result.add(metadata);
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.
the class RemoteLogMetadataCache method addCopyInProgressSegment.
/**
* This method tracks the given remote segment as not yet available for reads. It does not add the segment
* leader epoch offset mapping until this segment reaches COPY_SEGMENT_FINISHED state.
*
* @param remoteLogSegmentMetadata RemoteLogSegmentMetadata instance
*/
public void addCopyInProgressSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata) {
log.debug("Adding to in-progress state: [{}]", remoteLogSegmentMetadata);
Objects.requireNonNull(remoteLogSegmentMetadata, "remoteLogSegmentMetadata can not be null");
// but not to update the existing remote log segment metadata.
if (remoteLogSegmentMetadata.state() != RemoteLogSegmentState.COPY_SEGMENT_STARTED) {
throw new IllegalArgumentException("Given remoteLogSegmentMetadata:" + remoteLogSegmentMetadata + " should have state as " + RemoteLogSegmentState.COPY_SEGMENT_STARTED + " but it contains state as: " + remoteLogSegmentMetadata.state());
}
RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
RemoteLogSegmentMetadata existingMetadata = idToSegmentMetadata.get(remoteLogSegmentId);
checkStateTransition(existingMetadata != null ? existingMetadata.state() : null, remoteLogSegmentMetadata.state());
for (Integer epoch : remoteLogSegmentMetadata.segmentLeaderEpochs().keySet()) {
leaderEpochEntries.computeIfAbsent(epoch, leaderEpoch -> new RemoteLogLeaderEpochState()).handleSegmentWithCopySegmentStartedState(remoteLogSegmentId);
}
idToSegmentMetadata.put(remoteLogSegmentId, remoteLogSegmentMetadata);
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata in project kafka by apache.
the class RemoteLogSegmentMetadataTransform method fromApiMessageAndVersion.
@Override
public RemoteLogSegmentMetadata fromApiMessageAndVersion(ApiMessageAndVersion apiMessageAndVersion) {
RemoteLogSegmentMetadataRecord record = (RemoteLogSegmentMetadataRecord) apiMessageAndVersion.message();
RemoteLogSegmentId remoteLogSegmentId = buildRemoteLogSegmentId(record.remoteLogSegmentId());
Map<Integer, Long> segmentLeaderEpochs = new HashMap<>();
for (RemoteLogSegmentMetadataRecord.SegmentLeaderEpochEntry segmentLeaderEpoch : record.segmentLeaderEpochs()) {
segmentLeaderEpochs.put(segmentLeaderEpoch.leaderEpoch(), segmentLeaderEpoch.offset());
}
RemoteLogSegmentMetadata remoteLogSegmentMetadata = new RemoteLogSegmentMetadata(remoteLogSegmentId, record.startOffset(), record.endOffset(), record.maxTimestampMs(), record.brokerId(), record.eventTimestampMs(), record.segmentSizeInBytes(), segmentLeaderEpochs);
RemoteLogSegmentMetadataUpdate rlsmUpdate = new RemoteLogSegmentMetadataUpdate(remoteLogSegmentId, record.eventTimestampMs(), RemoteLogSegmentState.forId(record.remoteLogSegmentState()), record.brokerId());
return remoteLogSegmentMetadata.createWithUpdates(rlsmUpdate);
}
Aggregations