use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class StreamMetadataStoreTest method testReaderGroups.
@Test(timeout = 30000)
public void testReaderGroups() throws Exception {
final String scopeRGTest = "scopeRGTest";
final String streamRGTest = "streamRGTest";
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
Controller.CreateScopeStatus createScopeStatus = store.createScope(scopeRGTest, null, executor).join();
assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, createScopeStatus.getStatus());
store.createStream(scopeRGTest, streamRGTest, configuration, start, null, executor).join();
store.setState(scopeRGTest, streamRGTest, State.ACTIVE, null, executor).join();
final String rgName = "readerGroupRGTest";
final UUID rgId = UUID.randomUUID();
final Segment seg0 = new Segment(scopeRGTest, streamRGTest, 0L);
final Segment seg1 = new Segment(scopeRGTest, streamRGTest, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(scopeRGTest, streamRGTest), new StreamCutImpl(Stream.of(scopeRGTest, streamRGTest), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(scopeRGTest, streamRGTest), new StreamCutImpl(Stream.of(scopeRGTest, streamRGTest), endStreamCut));
ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
rgConfig = ReaderGroupConfig.cloneConfig(rgConfig, rgId, 0L);
final OperationContext rgContext = store.createRGContext(scopeRGTest, rgName, 0L);
store.addReaderGroupToScope(scopeRGTest, rgName, rgConfig.getReaderGroupId(), rgContext, executor).join();
store.createReaderGroup(scopeRGTest, rgName, rgConfig, System.currentTimeMillis(), rgContext, executor).join();
UUID readerGroupId = store.getReaderGroupId(scopeRGTest, rgName, rgContext, executor).get();
assertEquals(rgId, readerGroupId);
ReaderGroupConfigRecord cfgRecord = store.getReaderGroupConfigRecord(scopeRGTest, rgName, rgContext, executor).join().getObject();
assertEquals(false, cfgRecord.isUpdating());
assertEquals(rgConfig.getGeneration(), cfgRecord.getGeneration());
assertEquals(rgConfig.getAutomaticCheckpointIntervalMillis(), cfgRecord.getAutomaticCheckpointIntervalMillis());
assertEquals(rgConfig.getGroupRefreshTimeMillis(), cfgRecord.getGroupRefreshTimeMillis());
assertEquals(rgConfig.getStartingStreamCuts().size(), cfgRecord.getStartingStreamCuts().size());
VersionedMetadata<ReaderGroupState> rgState = store.getVersionedReaderGroupState(scopeRGTest, rgName, true, rgContext, executor).get();
assertEquals(ReaderGroupState.CREATING, rgState.getObject());
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class MetadataScalabilityTest method truncation.
void truncation(ControllerImpl controller, List<List<Segment>> listOfEpochs) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
Stream stream = new StreamImpl(SCOPE, getStreamName());
// try SCALES_TO_PERFORM randomly generated stream cuts and truncate stream at those
// stream cuts.
List<AtomicInteger> indexes = new LinkedList<>();
Random rand = new Random();
for (int i = 0; i < numSegments; i++) {
indexes.add(new AtomicInteger(1));
}
Futures.loop(() -> indexes.stream().allMatch(x -> x.get() < scalesToPerform - 1), () -> {
// We randomly generate a stream cut in each iteration of this loop. A valid stream
// cut in this scenario contains for each position i in [0, numSegments -1], a segment
// from one of the scale epochs of the stream. For each position i, we randomly
// choose an epoch and pick the segment at position i. It increments the epoch
// index accordingly (indexes list) so that in the next iteration it chooses a later
// epoch for the same i.
//
// Because the segment in position i always contain the range [d * (i-1), d * i],
// where d = 1 / (number of segments), the stream cut is guaranteed to cover
// the entire key space.
Map<Segment, Long> map = new HashMap<>();
for (int i = 0; i < numSegments; i++) {
AtomicInteger index = indexes.get(i);
index.set(index.get() + rand.nextInt(scalesToPerform - index.get()));
map.put(listOfEpochs.get(index.get()).get(i), 0L);
}
StreamCut cut = new StreamCutImpl(stream, map);
log.info("truncating stream at {}", map);
return controller.truncateStream(SCOPE, streamName, cut).thenCompose(truncated -> {
log.info("stream truncated successfully at {}", cut);
assertTrue(truncated);
// we will just validate that a non empty value is returned.
return controller.getSuccessors(cut).thenAccept(successors -> {
assertTrue(successors.getSegments().size() > 0);
log.info("Successors for streamcut {} are {}", cut, successors);
});
});
}, executorService).join();
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionTimeLimitWithOverlappingMinTest.
@Test(timeout = 30000)
public void consumptionBasedRetentionTimeLimitWithOverlappingMinTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(10), Duration.ofMillis(50));
String stream1 = "consumptionSizeOverlap";
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
streamMetadataTasks.setRetentionFrequencyMillis(1L);
AtomicLong time = new AtomicLong(0L);
streamMetadataTasks.setRetentionClock(time::get);
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
// create a retention set that has 5 values
// s0: 10: seg0/1, seg1/5 ==> time retained if truncated at = 10 <= min
// s1: 20: seg0/1, seg1/6 ==> time retained if truncated at = 0
time.set(10L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 5L, ImmutableMap.of(0L, 1L, 1L, 5L)), null, executor).join();
time.set(20L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 6L, ImmutableMap.of(0L, 1L, 1L, 6L)), null, executor).join();
// subscriber streamcut : 0/0, 1/10
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 0L, 1L, 10L), 0L).join();
// overlap with min, no clear max. no truncation.
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertFalse(truncationRecord.getObject().isUpdating());
// s0: 10: seg0/1, seg1/5 ==> time retained if truncated at = 40 <== max
// s1: 20: seg0/1, seg1/6 ==> time retained if truncated at = 30
// s2: 30: seg0/10, seg1/7 ==> time retained if truncated at = 20
// s3: 40: seg0/10, seg1/8 ==> time retained if truncated at = 10 <== min
// s4: 50: seg0/10, seg1/10
time.set(30L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 17L, ImmutableMap.of(0L, 10L, 1L, 7L)), null, executor).join();
time.set(40L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 18L, ImmutableMap.of(0L, 10L, 1L, 8L)), null, executor).join();
time.set(50L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
// subscriber streamcut: slb: seg0/9, seg1/10 ==> overlaps with min bound streamcut.
// so we should actually truncate at streamcut before slb.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 9L, 1L, 10L), 0L).join();
// this should truncate as s1. first streamcut before slb.
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 6L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionSizeLimitTest.
@Test(timeout = 30000)
public void consumptionBasedRetentionSizeLimitTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(2L, 10L);
String stream1 = "consumptionSize";
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
streamMetadataTasks.setRetentionFrequencyMillis(1L);
// region case 1: basic retention
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
String subscriber2 = "subscriber2";
createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 2L, 1L, 1L), 0L).join();
final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 1L, 1L, 2L), 0L).join();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 2L);
map1.put(1L, 2L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// call retention and verify that retention policy applies
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
// now retention set has one stream cut 0/2, 1/2
// subscriber lowerbound is 0/1, 1/1.. trucation should happen at lowerbound
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
// region case 2 min policy check
// we will update the new streamcut to 0/10, 1/10
map1.put(0L, 2L);
map1.put(1L, 2L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(20L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// update both readers to make sure they have read till the latest position. we have set the min limit to 2.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 2L, 1L, 2L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 2L, 1L, 2L), 0L).join();
// no new truncation should happen.
// verify that truncation record has not changed.
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 20L, null, "").join();
// now retention set has two stream cut 0/2, 1/2...0/2, 1/2
// subscriber lowerbound is 0/2, 1/2.. does not meet min bound criteria. we also do not have a max that satisfies the limit. no truncation should happen.
// no change:
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
assertFalse(truncationRecord.getObject().isUpdating());
// endregion
// region case 3: min criteria not met on lower bound. truncate at min.
map1.put(0L, 10L);
map1.put(1L, 10L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(30L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 10L, 1L, 9L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 10L, 1L, 9L), 0L).join();
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 30L, null, "").join();
// now retention set has three stream cut 0/2, 1/2...0/2, 1/2... 0/10, 1/10
// subscriber lowerbound is 0/10, 1/9.. does not meet min bound criteria. but we have min bound on truncation record
// truncation should happen at 0/2, 1/2
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 2L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 2L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
// region case 4: lowerbound behind max
// now move the stream further ahead so that max truncation limit is crossed but lowerbound is behind max.
map1.put(0L, 20L);
map1.put(1L, 20L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(40L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 40L, null, "").join();
// now retention set has four stream cut 0/2, 1/2...0/2, 1/2... 0/10, 1/10.. 0/20, 1/20
// subscriber lowerbound is 0/10, 1/9.. meets min bound criteria. but goes beyond the max criteria.
// no streamcut can be chosen from the available stream cuts in retention set without breaking either min or max criteria.
// in this case max will be chosen as min with 0/10, 1/10.. this will be compared with subscriber lowerbound and whichever
// purges more data will be chosen.
// so truncation should happen at 0/10, 1/10
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 10L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 10L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
// region case 5: lowerbound overlaps is beyond max but there is no clear max streamcut available in retention set
map1.put(0L, 30L);
map1.put(1L, 30L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(50L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 21L, 1L, 19L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 21L, 1L, 19L), 0L).join();
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 50L, null, "").join();
// now retention set has five stream cut 0/2, 1/2...0/2, 1/2... 0/10, 1/10.. 0/20, 1/20.. 0/30, 1/30
// subscriber lowerbound is 0/21, 1/19.. meets min bound criteria. and its also greater than max bound.
// but max bound streamcut cannot be chosen from retention set. same as previous case..
// but this time we have a min bound and max bound as 0/20, 1/20.
// truncation should happen at lowerbound as data retained is identical for lowerbound and streamcut from retentionset.
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 21L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 19L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
}
use of io.pravega.client.stream.impl.StreamCutImpl in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionWithScale.
@Test(timeout = 30000)
public void consumptionBasedRetentionWithScale() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(3);
final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(0L, 1000L);
String stream1 = "consumptionSize";
StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
assertEquals(0L, createResponse1.getConfig().getGeneration());
assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
String subscriber2 = "subscriber2";
createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
assertEquals(0L, createResponse2.getConfig().getGeneration());
assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
// example::
// | s0 | s3 |
// | | s4 | | s6
// | s1 | s5 |
// | s2 | |
// valid stream cuts: { s0/off, s5/-1 }, { s0/off, s2/off, s5/-1 }
// lower bound = { s0/off, s2/off, s5/-1 }
// valid stream cuts: { s0/off, s5/-1 }, { s0/off, s2/off, s5/-1 }, { s0/off, s1/off, s2/off }
// lower bound = { s0/off, s1/off, s2/off }
long three = NameUtils.computeSegmentId(3, 1);
long four = NameUtils.computeSegmentId(4, 1);
long five = NameUtils.computeSegmentId(5, 2);
long six = NameUtils.computeSegmentId(6, 3);
// 0 split to 3 and 4
scale(SCOPE, stream1, ImmutableMap.of(0L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0 / 6), new AbstractMap.SimpleEntry<>(1.0 / 6, 1.0 / 3)));
// 4, 1, 2 merged to 5
scale(SCOPE, stream1, ImmutableMap.of(1L, 1L, 2L, 2L, four, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(1.0 / 6, 1.0)));
// merge 3, 5 to 6
scale(SCOPE, stream1, ImmutableMap.of(three, 1L, five, 2L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
streamMetadataTasks.setRetentionFrequencyMillis(1L);
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, five, -1L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, 2L, 1L, five, -1L), 0L).join();
Map<Long, Long> map1 = new HashMap<>();
map1.put(six, 2L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// call retention and verify that retention policy applies
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
// now retention set has one stream cut 6/2
// subscriber lowerbound is 0/1, 2/1, 5/-1.. trucation should happen at lowerbound
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(2L).longValue(), 1L);
assertEquals(truncationRecord.getObject().getStreamCut().get(five).longValue(), -1L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Aggregations