use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class TopicBasedRemoteLogMetadataManagerTest method testNewPartitionUpdates.
@Test
public void testNewPartitionUpdates() throws Exception {
// Create topics.
String leaderTopic = "new-leader";
HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
List<Object> leaderTopicReplicas = new ArrayList<>();
// Set broker id 0 as the first entry which is taken as the leader.
leaderTopicReplicas.add(0);
leaderTopicReplicas.add(1);
leaderTopicReplicas.add(2);
assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
String followerTopic = "new-follower";
HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
List<Object> followerTopicReplicas = new ArrayList<>();
// Set broker id 1 as the first entry which is taken as the leader.
followerTopicReplicas.add(1);
followerTopicReplicas.add(2);
followerTopicReplicas.add(0);
assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
final TopicIdPartition newLeaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
final TopicIdPartition newFollowerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
// Add segments for these partitions but an exception is received as they have not yet been subscribed.
// These messages would have been published to the respective metadata topic partitions but the ConsumerManager
// has not yet been subscribing as they are not yet registered.
RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(newLeaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
Assertions.assertThrows(Exception.class, () -> topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get());
RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(newFollowerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
Assertions.assertThrows(Exception.class, () -> topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get());
// `listRemoteLogSegments` will receive an exception as these topic partitions are not yet registered.
Assertions.assertThrows(RemoteResourceNotFoundException.class, () -> topicBasedRlmm().listRemoteLogSegments(newLeaderTopicIdPartition));
Assertions.assertThrows(RemoteResourceNotFoundException.class, () -> topicBasedRlmm().listRemoteLogSegments(newFollowerTopicIdPartition));
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(newLeaderTopicIdPartition), Collections.singleton(newFollowerTopicIdPartition));
// RemoteLogSegmentMetadata events are already published, and topicBasedRlmm's consumer manager will start
// fetching those events and build the cache.
waitUntilConsumerCatchesup(newLeaderTopicIdPartition, newFollowerTopicIdPartition, 30_000L);
Assertions.assertTrue(topicBasedRlmm().listRemoteLogSegments(newLeaderTopicIdPartition).hasNext());
Assertions.assertTrue(topicBasedRlmm().listRemoteLogSegments(newFollowerTopicIdPartition).hasNext());
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemotePartitionMetadataStore method handleRemoteLogSegmentMetadata.
@Override
public void handleRemoteLogSegmentMetadata(RemoteLogSegmentMetadata remoteLogSegmentMetadata) {
log.debug("Adding remote log segment : [{}]", remoteLogSegmentMetadata);
final RemoteLogSegmentId remoteLogSegmentId = remoteLogSegmentMetadata.remoteLogSegmentId();
TopicIdPartition topicIdPartition = remoteLogSegmentId.topicIdPartition();
// This should have been already existing as it is loaded when the partitions are assigned.
RemoteLogMetadataCache remoteLogMetadataCache = idToRemoteLogMetadataCache.get(topicIdPartition);
if (remoteLogMetadataCache != null) {
remoteLogMetadataCache.addCopyInProgressSegment(remoteLogSegmentMetadata);
} else {
throw new IllegalStateException("No partition metadata found for : " + topicIdPartition);
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemotePartitionMetadataStore method handleRemoteLogSegmentMetadataUpdate.
@Override
public void handleRemoteLogSegmentMetadataUpdate(RemoteLogSegmentMetadataUpdate rlsmUpdate) {
log.debug("Updating remote log segment: [{}]", rlsmUpdate);
RemoteLogSegmentId remoteLogSegmentId = rlsmUpdate.remoteLogSegmentId();
TopicIdPartition topicIdPartition = remoteLogSegmentId.topicIdPartition();
RemoteLogMetadataCache remoteLogMetadataCache = idToRemoteLogMetadataCache.get(topicIdPartition);
if (remoteLogMetadataCache != null) {
try {
remoteLogMetadataCache.updateRemoteLogSegmentMetadata(rlsmUpdate);
} catch (RemoteResourceNotFoundException e) {
log.warn("Error occurred while updating the remote log segment.", e);
}
} else {
throw new IllegalStateException("No partition metadata found for : " + topicIdPartition);
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogSegmentLifecycleTest method testRemoteLogSegmentLifeCycle.
@ParameterizedTest(name = "remoteLogSegmentLifecycleManager = {0}")
@MethodSource("remoteLogSegmentLifecycleManagers")
public void testRemoteLogSegmentLifeCycle(RemoteLogSegmentLifecycleManager remoteLogSegmentLifecycleManager) throws Exception {
try {
remoteLogSegmentLifecycleManager.initialize(topicIdPartition);
// segment 0
// offsets: [0-100]
// leader epochs (0,0), (1,20), (2,80)
Map<Integer, Long> segment0LeaderEpochs = new HashMap<>();
segment0LeaderEpochs.put(0, 0L);
segment0LeaderEpochs.put(1, 20L);
segment0LeaderEpochs.put(2, 80L);
RemoteLogSegmentId segment0Id = new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid());
RemoteLogSegmentMetadata segment0Metadata = new RemoteLogSegmentMetadata(segment0Id, 0L, 100L, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, segment0LeaderEpochs);
remoteLogSegmentLifecycleManager.addRemoteLogSegmentMetadata(segment0Metadata);
// We should not get this as the segment is still getting copied and it is not yet considered successful until
// it reaches RemoteLogSegmentState.COPY_SEGMENT_FINISHED.
Assertions.assertFalse(remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(40, 1).isPresent());
// Check that these leader epochs are not to be considered for highestOffsetForEpoch API as they are still getting copied.
Stream.of(0, 1, 2).forEach(epoch -> {
try {
Assertions.assertFalse(remoteLogSegmentLifecycleManager.highestOffsetForEpoch(epoch).isPresent());
} catch (RemoteStorageException e) {
Assertions.fail(e);
}
});
RemoteLogSegmentMetadataUpdate segment0Update = new RemoteLogSegmentMetadataUpdate(segment0Id, time.milliseconds(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, BROKER_ID_1);
remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(segment0Update);
RemoteLogSegmentMetadata expectedSegment0Metadata = segment0Metadata.createWithUpdates(segment0Update);
// segment 1
// offsets: [101 - 200]
// no changes in leadership with in this segment
// leader epochs (2, 101)
Map<Integer, Long> segment1LeaderEpochs = Collections.singletonMap(2, 101L);
RemoteLogSegmentMetadata segment1Metadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, segment1LeaderEpochs, 101L, 200L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
// segment 2
// offsets: [201 - 300]
// moved to epoch 3 in between
// leader epochs (2, 201), (3, 240)
Map<Integer, Long> segment2LeaderEpochs = new HashMap<>();
segment2LeaderEpochs.put(2, 201L);
segment2LeaderEpochs.put(3, 240L);
RemoteLogSegmentMetadata segment2Metadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, segment2LeaderEpochs, 201L, 300L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
// segment 3
// offsets: [250 - 400]
// leader epochs (3, 250), (4, 370)
Map<Integer, Long> segment3LeaderEpochs = new HashMap<>();
segment3LeaderEpochs.put(3, 250L);
segment3LeaderEpochs.put(4, 370L);
RemoteLogSegmentMetadata segment3Metadata = createSegmentUpdateWithState(remoteLogSegmentLifecycleManager, segment3LeaderEpochs, 250L, 400L, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
// ////////////////////////////////////////////////////////////////////////////////////////
// Four segments are added with different boundaries and leader epochs.
// Search for cache.remoteLogSegmentMetadata(leaderEpoch, offset) for different
// epochs and offsets
// ////////////////////////////////////////////////////////////////////////////////////////
HashMap<EpochOffset, RemoteLogSegmentMetadata> expectedEpochOffsetToSegmentMetadata = new HashMap<>();
// Existing metadata entries.
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(1, 40), expectedSegment0Metadata);
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(2, 110), segment1Metadata);
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(3, 240), segment2Metadata);
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(3, 250), segment3Metadata);
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(4, 375), segment3Metadata);
// Non existing metadata entries.
// Search for offset 110, epoch 1, and it should not exist.
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(1, 110), null);
// Search for non existing offset 401, epoch 4.
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(4, 401), null);
// Search for non existing epoch 5.
expectedEpochOffsetToSegmentMetadata.put(new EpochOffset(5, 301), null);
for (Map.Entry<EpochOffset, RemoteLogSegmentMetadata> entry : expectedEpochOffsetToSegmentMetadata.entrySet()) {
EpochOffset epochOffset = entry.getKey();
Optional<RemoteLogSegmentMetadata> segmentMetadata = remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(epochOffset.epoch, epochOffset.offset);
RemoteLogSegmentMetadata expectedSegmentMetadata = entry.getValue();
log.debug("Searching for {} , result: {}, expected: {} ", epochOffset, segmentMetadata, expectedSegmentMetadata);
if (expectedSegmentMetadata != null) {
Assertions.assertEquals(Optional.of(expectedSegmentMetadata), segmentMetadata);
} else {
Assertions.assertFalse(segmentMetadata.isPresent());
}
}
// Update segment with state as DELETE_SEGMENT_STARTED.
// It should not be available when we search for that segment.
remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(expectedSegment0Metadata.remoteLogSegmentId(), time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_STARTED, BROKER_ID_1));
Assertions.assertFalse(remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 10).isPresent());
// Update segment with state as DELETE_SEGMENT_FINISHED.
// It should not be available when we search for that segment.
remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(expectedSegment0Metadata.remoteLogSegmentId(), time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, BROKER_ID_1));
Assertions.assertFalse(remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 10).isPresent());
// ////////////////////////////////////////////////////////////////////////////////////////
// Search for cache.highestLogOffset(leaderEpoch) for all the leader epochs
// ////////////////////////////////////////////////////////////////////////////////////////
Map<Integer, Long> expectedEpochToHighestOffset = new HashMap<>();
expectedEpochToHighestOffset.put(0, 19L);
expectedEpochToHighestOffset.put(1, 79L);
expectedEpochToHighestOffset.put(2, 239L);
expectedEpochToHighestOffset.put(3, 369L);
expectedEpochToHighestOffset.put(4, 400L);
for (Map.Entry<Integer, Long> entry : expectedEpochToHighestOffset.entrySet()) {
Integer epoch = entry.getKey();
Long expectedOffset = entry.getValue();
Optional<Long> offset = remoteLogSegmentLifecycleManager.highestOffsetForEpoch(epoch);
log.debug("Fetching highest offset for epoch: {} , returned: {} , expected: {}", epoch, offset, expectedOffset);
Assertions.assertEquals(Optional.of(expectedOffset), offset);
}
// Search for non existing leader epoch
Optional<Long> highestOffsetForEpoch5 = remoteLogSegmentLifecycleManager.highestOffsetForEpoch(5);
Assertions.assertFalse(highestOffsetForEpoch5.isPresent());
} finally {
Utils.closeQuietly(remoteLogSegmentLifecycleManager, "RemoteLogSegmentLifecycleManager");
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class FileBasedRemoteLogMetadataCache method flushToFile.
/**
* Flushes the in-memory state to the snapshot file.
*
* @param metadataPartition remote log metadata partition from which the messages have been consumed for the given
* user topic partition.
* @param metadataPartitionOffset remote log metadata partition offset up to which the messages have been consumed.
* @throws IOException if any errors occurred while writing the snapshot to the file.
*/
public void flushToFile(int metadataPartition, Long metadataPartitionOffset) throws IOException {
List<RemoteLogSegmentMetadataSnapshot> snapshots = new ArrayList<>(idToSegmentMetadata.size());
for (RemoteLogLeaderEpochState state : leaderEpochEntries.values()) {
// the snapshot to build RemoteLogMetadataCache.
for (RemoteLogSegmentId id : state.unreferencedSegmentIds()) {
snapshots.add(RemoteLogSegmentMetadataSnapshot.create(idToSegmentMetadata.get(id)));
}
// Add referenced segments.
for (RemoteLogSegmentId id : state.referencedSegmentIds()) {
snapshots.add(RemoteLogSegmentMetadataSnapshot.create(idToSegmentMetadata.get(id)));
}
}
snapshotFile.write(new RemoteLogMetadataSnapshotFile.Snapshot(metadataPartition, metadataPartitionOffset, snapshots));
}
Aggregations