use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataCacheTest method testAPIsWithInvalidArgs.
@Test
public void testAPIsWithInvalidArgs() {
RemoteLogMetadataCache cache = new RemoteLogMetadataCache();
Assertions.assertThrows(NullPointerException.class, () -> cache.addCopyInProgressSegment(null));
Assertions.assertThrows(NullPointerException.class, () -> cache.updateRemoteLogSegmentMetadata(null));
// Check for invalid state updates to addCopyInProgressSegment method.
for (RemoteLogSegmentState state : RemoteLogSegmentState.values()) {
if (state != RemoteLogSegmentState.COPY_SEGMENT_STARTED) {
RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(TP0, Uuid.randomUuid()), 0, 100L, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
RemoteLogSegmentMetadata updatedMetadata = segmentMetadata.createWithUpdates(new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), state, BROKER_ID_1));
Assertions.assertThrows(IllegalArgumentException.class, () -> cache.addCopyInProgressSegment(updatedMetadata));
}
}
// Check for updating non existing segment-id.
Assertions.assertThrows(RemoteResourceNotFoundException.class, () -> {
RemoteLogSegmentId nonExistingId = new RemoteLogSegmentId(TP0, Uuid.randomUuid());
cache.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(nonExistingId, time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_STARTED, BROKER_ID_1));
});
// Check for invalid state transition.
Assertions.assertThrows(IllegalStateException.class, () -> {
RemoteLogSegmentMetadata segmentMetadata = createSegmentUpdateWithState(cache, Collections.singletonMap(0, 0L), 0, 100, RemoteLogSegmentState.COPY_SEGMENT_FINISHED);
cache.updateRemoteLogSegmentMetadata(new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, BROKER_ID_1));
});
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataSerdeTest method createRemoteLogSegmentMetadata.
private RemoteLogSegmentMetadata createRemoteLogSegmentMetadata() {
Map<Integer, Long> segLeaderEpochs = new HashMap<>();
segLeaderEpochs.put(0, 0L);
segLeaderEpochs.put(1, 20L);
segLeaderEpochs.put(2, 80L);
RemoteLogSegmentId remoteLogSegmentId = new RemoteLogSegmentId(TP0, Uuid.randomUuid());
return new RemoteLogSegmentMetadata(remoteLogSegmentId, 0L, 100L, -1L, 1, time.milliseconds(), 1024, segLeaderEpochs);
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogMetadataTransformTest method testRemoteLogSegmentMetadataUpdateTransform.
@Test
public void testRemoteLogSegmentMetadataUpdateTransform() {
RemoteLogSegmentMetadataUpdateTransform metadataUpdateTransform = new RemoteLogSegmentMetadataUpdateTransform();
RemoteLogSegmentMetadataUpdate metadataUpdate = new RemoteLogSegmentMetadataUpdate(new RemoteLogSegmentId(TP0, Uuid.randomUuid()), time.milliseconds(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, 1);
ApiMessageAndVersion apiMessageAndVersion = metadataUpdateTransform.toApiMessageAndVersion(metadataUpdate);
RemoteLogSegmentMetadataUpdate metadataUpdateFromRecord = metadataUpdateTransform.fromApiMessageAndVersion(apiMessageAndVersion);
Assertions.assertEquals(metadataUpdate, metadataUpdateFromRecord);
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogSegmentLifecycleTest method testCacheSegmentWithCopySegmentStartedState.
@ParameterizedTest(name = "remoteLogSegmentLifecycleManager = {0}")
@MethodSource("remoteLogSegmentLifecycleManagers")
public void testCacheSegmentWithCopySegmentStartedState(RemoteLogSegmentLifecycleManager remoteLogSegmentLifecycleManager) throws Exception {
try {
remoteLogSegmentLifecycleManager.initialize(topicIdPartition);
// Create a segment with state COPY_SEGMENT_STARTED, and check for searching that segment and listing the
// segments.
RemoteLogSegmentId segmentId = new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid());
RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(segmentId, 0L, 50L, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
remoteLogSegmentLifecycleManager.addRemoteLogSegmentMetadata(segmentMetadata);
// This segment should not be available as the state is not reached to COPY_SEGMENT_FINISHED.
Optional<RemoteLogSegmentMetadata> segMetadataForOffset0Epoch0 = remoteLogSegmentLifecycleManager.remoteLogSegmentMetadata(0, 0);
Assertions.assertFalse(segMetadataForOffset0Epoch0.isPresent());
// cache.listRemoteLogSegments APIs should contain the above segment.
checkListSegments(remoteLogSegmentLifecycleManager, 0, segmentMetadata);
} finally {
Utils.closeQuietly(remoteLogSegmentLifecycleManager, "RemoteLogSegmentLifecycleManager");
}
}
use of org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId in project kafka by apache.
the class RemoteLogSegmentLifecycleTest method createSegmentUpdateWithState.
private RemoteLogSegmentMetadata createSegmentUpdateWithState(RemoteLogSegmentLifecycleManager remoteLogSegmentLifecycleManager, Map<Integer, Long> segmentLeaderEpochs, long startOffset, long endOffset, RemoteLogSegmentState state) throws RemoteStorageException {
RemoteLogSegmentId segmentId = new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid());
RemoteLogSegmentMetadata segmentMetadata = new RemoteLogSegmentMetadata(segmentId, startOffset, endOffset, -1L, BROKER_ID_0, time.milliseconds(), SEG_SIZE, segmentLeaderEpochs);
remoteLogSegmentLifecycleManager.addRemoteLogSegmentMetadata(segmentMetadata);
RemoteLogSegmentMetadataUpdate segMetadataUpdate = new RemoteLogSegmentMetadataUpdate(segmentId, time.milliseconds(), state, BROKER_ID_1);
remoteLogSegmentLifecycleManager.updateRemoteLogSegmentMetadata(segMetadataUpdate);
return segmentMetadata.createWithUpdates(segMetadataUpdate);
}
Aggregations