use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class TopicBasedRemoteLogMetadataManagerRestartTest method testRLMMAPIsAfterRestart.
@Test
public void testRLMMAPIsAfterRestart() throws Exception {
// Create topics.
String leaderTopic = "new-leader";
HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
List<Object> leaderTopicReplicas = new ArrayList<>();
// Set broker id 0 as the first entry which is taken as the leader.
leaderTopicReplicas.add(0);
leaderTopicReplicas.add(1);
leaderTopicReplicas.add(2);
assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
String followerTopic = "new-follower";
HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
List<Object> followerTopicReplicas = new ArrayList<>();
// Set broker id 1 as the first entry which is taken as the leader.
followerTopicReplicas.add(1);
followerTopicReplicas.add(2);
followerTopicReplicas.add(0);
assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
// Register these partitions to RLMM.
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
// Add segments for these partitions but they are not available as they have not yet been subscribed.
RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get();
RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get();
// Stop TopicBasedRemoteLogMetadataManager only.
stopTopicBasedRemoteLogMetadataManagerHarness();
// Start TopicBasedRemoteLogMetadataManager but do not start consumer thread to check whether the stored metadata is
// loaded successfully or not.
startTopicBasedRemoteLogMetadataManagerHarness(false);
// Register these partitions to RLMM, which loads the respective metadata snapshots.
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
// Check for the stored entries from the earlier run.
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)));
// Check whether the check-pointed consumer offsets are stored or not.
Path committedOffsetsPath = new File(logDir, COMMITTED_OFFSETS_FILE_NAME).toPath();
Assertions.assertTrue(committedOffsetsPath.toFile().exists());
CommittedOffsetsFile committedOffsetsFile = new CommittedOffsetsFile(committedOffsetsPath.toFile());
int metadataPartition1 = topicBasedRlmm().metadataPartition(leaderTopicIdPartition);
int metadataPartition2 = topicBasedRlmm().metadataPartition(followerTopicIdPartition);
Optional<Long> receivedOffsetForPartition1 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition1);
Optional<Long> receivedOffsetForPartition2 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition2);
Assertions.assertTrue(receivedOffsetForPartition1.isPresent());
Assertions.assertTrue(receivedOffsetForPartition2.isPresent());
// Make sure these offsets are at least 0.
Assertions.assertTrue(receivedOffsetForPartition1.get() >= 0);
Assertions.assertTrue(receivedOffsetForPartition2.get() >= 0);
// Check the stored entries and the offsets that were set on consumer are the same.
Map<Integer, Long> partitionToOffset = committedOffsetsFile.readEntries();
Assertions.assertEquals(partitionToOffset.get(metadataPartition1), receivedOffsetForPartition1.get());
Assertions.assertEquals(partitionToOffset.get(metadataPartition2), receivedOffsetForPartition2.get());
// Start Consumer thread
topicBasedRlmm().startConsumerThread();
// Add one more segment
RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 101, 200, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L));
topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get();
// Check that both the stored segment and recently added segment are available.
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogLeaderEpochState method collectConvertedIdToMetadata.
private void collectConvertedIdToMetadata(Collection<RemoteLogSegmentId> segmentIds, Map<RemoteLogSegmentId, RemoteLogSegmentMetadata> idToSegmentMetadata, Collection<RemoteLogSegmentMetadata> result) throws RemoteResourceNotFoundException {
for (RemoteLogSegmentId id : segmentIds) {
RemoteLogSegmentMetadata metadata = idToSegmentMetadata.get(id);
if (metadata == null) {
throw new RemoteResourceNotFoundException("No remote log segment metadata found for :" + id);
}
result.add(metadata);
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataCache method doHandleSegmentStateTransitionForLeaderEpochs.
private void doHandleSegmentStateTransitionForLeaderEpochs(RemoteLogSegmentMetadata remoteLogSegmentMetadata, RemoteLogLeaderEpochState.Action action) {
RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
Map<Integer, Long> leaderEpochToOffset = remoteLogSegmentMetadata.segmentLeaderEpochs();
// Go through all the leader epochs and apply the given action.
for (Map.Entry<Integer, Long> entry : leaderEpochToOffset.entrySet()) {
Integer leaderEpoch = entry.getKey();
Long startOffset = entry.getValue();
// leaderEpochEntries will be empty when resorting the metadata from snapshot.
RemoteLogLeaderEpochState remoteLogLeaderEpochState = leaderEpochEntries.computeIfAbsent(leaderEpoch, x -> new RemoteLogLeaderEpochState());
action.accept(leaderEpoch, remoteLogLeaderEpochState, startOffset, remoteLogSegmentId);
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataCache method addCopyInProgressSegment.
/**
* This method tracks the given remote segment as not yet available for reads. It does not add the segment
* leader epoch offset mapping until this segment reaches COPY_SEGMENT_FINISHED state.
*
* @param remoteLogSegmentMetadata RemoteLogSegmentMetadata instance
*/
public void addCopyInProgressSegment(RemoteLogSegmentMetadata remoteLogSegmentMetadata) {
log.debug("Adding to in-progress state: [{}]", remoteLogSegmentMetadata);
Objects.requireNonNull(remoteLogSegmentMetadata, "remoteLogSegmentMetadata can not be null");
// but not to update the existing remote log segment metadata.
if (remoteLogSegmentMetadata.state() != RemoteLogSegmentState.COPY_SEGMENT_STARTED) {
throw new IllegalArgumentException("Given remoteLogSegmentMetadata:" + remoteLogSegmentMetadata + " should have state as " + RemoteLogSegmentState.COPY_SEGMENT_STARTED + " but it contains state as: " + remoteLogSegmentMetadata.state());
}
RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
RemoteLogSegmentMetadata existingMetadata = idToSegmentMetadata.get(remoteLogSegmentId);
checkStateTransition(existingMetadata != null ? existingMetadata.state() : null, remoteLogSegmentMetadata.state());
for (Integer epoch : remoteLogSegmentMetadata.segmentLeaderEpochs().keySet()) {
leaderEpochEntries.computeIfAbsent(epoch, leaderEpoch -> new RemoteLogLeaderEpochState()).handleSegmentWithCopySegmentStartedState(remoteLogSegmentId);
}
idToSegmentMetadata.put(remoteLogSegmentId, remoteLogSegmentMetadata);
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogSegmentMetadataUpdateTransform method fromApiMessageAndVersion.
public RemoteLogSegmentMetadataUpdate fromApiMessageAndVersion(ApiMessageAndVersion apiMessageAndVersion) {
RemoteLogSegmentMetadataUpdateRecord record = (RemoteLogSegmentMetadataUpdateRecord) apiMessageAndVersion.message();
RemoteLogSegmentMetadataUpdateRecord.RemoteLogSegmentIdEntry entry = record.remoteLogSegmentId();
TopicIdPartition topicIdPartition = new TopicIdPartition(entry.topicIdPartition().id(), new TopicPartition(entry.topicIdPartition().name(), entry.topicIdPartition().partition()));
return new RemoteLogSegmentMetadataUpdate(new RemoteLogSegmentId(topicIdPartition, entry.id()), record.eventTimestampMs(), RemoteLogSegmentState.forId(record.remoteLogSegmentState()), record.brokerId());
}
Aggregations