use of io.pravega.controller.store.stream.records.StreamCutRecord in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionTimeLimitTest.
@Test(timeout = 30000)
public void consumptionBasedRetentionTimeLimitTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(1L), Duration.ofMillis(10L));
String stream1 = "consumptionTime";
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
streamMetadataTasks.setRetentionFrequencyMillis(1L);
AtomicLong time = new AtomicLong(0L);
streamMetadataTasks.setRetentionClock(time::get);
// region case 1: basic retention
final Segment seg0 = new Segment(SCOPE, stream1, 0L);
final Segment seg1 = new Segment(SCOPE, stream1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
String subscriber1 = "subscriber1";
CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
assertEquals(0L, createResponse1.getConfig().getGeneration());
String subscriber2 = "subscriber2";
createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
assertEquals(0L, createResponse2.getConfig().getGeneration());
final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 2L, 1L, 1L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, 1L, 2L), 0L).join();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 2L);
map1.put(1L, 2L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(time.get(), size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// call retention and verify that retention policy applies
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
// now retention set has one stream cut 0/2, 1/2, recording time 1L
// subscriber lowerbound is 0/1, 1/1.. trucation should not happen as this lowerbound is ahead of min retention streamcut.
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertFalse(truncationRecord.getObject().isUpdating());
// endregion
// region case 2 min policy check
// subscriber streamcut > min time streamcut
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(2L, 4L, ImmutableMap.of(0L, 2L, 1L, 2L)), null, executor).join();
time.set(10L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
time.set(11L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
// retentionset: 0L: 0L/2L, 1L/2L... 2L: 0L/2L, 1L/2L... 10L: 0/10, 1/10....11L: 0/10, 1/10.
// update both readers to 0/3, 1/3.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 3L, 1L, 3L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 3L, 1L, 3L), 0L).join();
// new truncation should happen at subscriber lowerbound.
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 3L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 3L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
// region case 3: min criteria not met on lower bound. truncate at max.
time.set(20L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 22L, ImmutableMap.of(0L, 11L, 1L, 11L)), null, executor).join();
// update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
// retentionset: 0L: 0L/2L, 1L/2L... 2L: 0L/2L, 1L/2L... 10L: 0/10, 1/10....11L: 0/10, 1/10... 20L: 0/11, 1/11
// subscriber lowerbound is 0/11, 1/11
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
// truncate at limit min
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 10L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 10L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
// region case 4: lowerbound behind max
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(30L, 40L, ImmutableMap.of(0L, 20L, 1L, 20L)), null, executor).join();
time.set(40L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 42L, ImmutableMap.of(0L, 21L, 1L, 21L)), null, executor).join();
// update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
// now retention set has five stream cuts 1: 0/2, 1/2...10: 0/10, 1/10... 20: 0/11, 1/11.. 30: 0/20, 1/20.. 40L: 0/21, 1/21
// subscriber lowerbound is 0/11, 1/11 ..
// maxbound = 30. truncate at max
// maxlimit = 30
// lowerbound is behind maxbound. we will truncate at max
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 20L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 20L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
// region case 5: lowerbound overlaps with maxbound
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(50L, 43L, ImmutableMap.of(0L, 21L, 1L, 22L)), null, executor).join();
time.set(59L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 60L, ImmutableMap.of(0L, 30L, 1L, 30L)), null, executor).join();
time.set(60L);
streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 60L, ImmutableMap.of(0L, 30L, 1L, 30L)), null, executor).join();
// update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 22L, 1L, 21L), 0L).join();
streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 22L, 1L, 21L), 0L).join();
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
// now retention set has eight stream cuts 1: 0/2, 1/2...10: 0/10, 1/10... 20: 0/11, 1/11.. 30: 0/20, 1/20.. 40L: 0/21, 1/21
// 50: 0/21, 1/22 ... 59: 0/30, 1/30.. 60: 0/30, 1/30
// subscriber lowerbound is 0/22, 1/21
// max: 50, limit: 50
// this overlaps with max. so truncate at max (50: 0/21, 1/22)
truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 21L);
assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 22L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
}
use of io.pravega.controller.store.stream.records.StreamCutRecord in project pravega by pravega.
the class StreamMetadataStoreTest method streamCutReferenceRecordBeforeTest.
@Test(timeout = 30000)
public void streamCutReferenceRecordBeforeTest() throws Exception {
final String scope = "ScopeRetain2";
final String stream = "StreamRetain";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofDays(2).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 1L);
map1.put(1L, 1L);
long recordingTime = 1;
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, Long.MIN_VALUE, ImmutableMap.copyOf(map1));
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 10L);
map2.put(1L, 10L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, Long.MIN_VALUE, ImmutableMap.copyOf(map2));
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 20L);
map3.put(1L, 20L);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
Map<Long, Long> streamCut = new HashMap<>();
RetentionSet retentionSet = store.getRetentionSet(scope, stream, null, executor).join();
// 0/0, 1/1 ..there should be nothing before it
streamCut.put(0L, 0L);
streamCut.put(1L, 1L);
StreamCutReferenceRecord beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/1, 1/1 .. sc1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/5, 1/5 .. sc1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/0, 1/5 .. nothing
streamCut.put(0L, 0L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/10, 1/10 ... sc2
streamCut.put(0L, 10L);
streamCut.put(1L, 10L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/9, 1/15 ... sc1
streamCut.put(0L, 9L);
streamCut.put(1L, 15L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/19, 1/20 ... sc2
streamCut.put(0L, 19L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/20, 1/20 ... sc3
streamCut.put(0L, 20L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut3.getRecordingTime());
// 0/21, 1/21 ... sc3
streamCut.put(0L, 21L);
streamCut.put(1L, 21L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut3.getRecordingTime());
// now add another entry so that we have even number of records and and repeat the test
// but here we make sure we are still using map3 but adding the time. we should always pick the latest if there
// are subsequent streamcutrecords with identical streamcuts.
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
retentionSet = store.getRetentionSet(scope, stream, null, executor).join();
// 0/0, 1/1 ..there should be nothing before it
streamCut.put(0L, 0L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/1, 1/1 .. 0/1, 1/1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/5, 1/5 .. 0/1, 1/1
streamCut.put(0L, 5L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/0, 1/5 .. nothing
streamCut.put(0L, 0L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/10, 1/10 ... 0/10, 1/10
streamCut.put(0L, 10L);
streamCut.put(1L, 10L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/9, 1/15 ... 0/1, 1/1
streamCut.put(0L, 9L);
streamCut.put(1L, 15L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/19, 1/20 ... 0/10, 1/10
streamCut.put(0L, 19L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/20, 1/20 ... 0/20, 1/20
streamCut.put(0L, 20L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
// 0/21, 1/21 ... 0/20, 1/20
streamCut.put(0L, 21L);
streamCut.put(1L, 21L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
// 0/31, 1/31 ... 0/30, 1/30
streamCut.put(0L, 30L);
streamCut.put(1L, 30L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
}
use of io.pravega.controller.store.stream.records.StreamCutRecord in project pravega by pravega.
the class StreamMetadataStoreTest method sizeTest.
@Test(timeout = 30000)
public void sizeTest() throws Exception {
final String scope = "ScopeSize";
final String stream = "StreamSize";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
bucketStore.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, stream, executor).get();
Set<String> streams = bucketStore.getStreamsForBucket(BucketStore.ServiceType.RetentionService, 0, executor).get();
assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
// region Size Computation on stream cuts on epoch 0
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 10L);
map1.put(1L, 10L);
Long size = store.getSizeTillStreamCut(scope, stream, map1, Optional.empty(), null, executor).join();
assertEquals(20L, (long) size);
long recordingTime = System.currentTimeMillis();
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, size, ImmutableMap.copyOf(map1));
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 20L);
map2.put(1L, 20L);
size = store.getSizeTillStreamCut(scope, stream, map2, Optional.empty(), null, executor).join();
assertEquals(40L, (long) size);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, size, ImmutableMap.copyOf(map2));
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 30L);
map3.put(1L, 30L);
size = store.getSizeTillStreamCut(scope, stream, map3, Optional.empty(), null, executor).join();
assertEquals(60L, (long) size);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, 60L, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
// endregion
// region Size Computation on multiple epochs
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.5, 1.0);
List<Long> scale1SealedSegments = Lists.newArrayList(0L, 1L);
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, null, null, executor).join();
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).get();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).get();
store.startScale(scope, stream, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 40L)), versioned, null, executor).join();
store.completeScale(scope, stream, versioned, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// complex stream cut - across two epochs
Map<Long, Long> map4 = new HashMap<>();
map4.put(0L, 40L);
map4.put(computeSegmentId(3, 1), 10L);
size = store.getSizeTillStreamCut(scope, stream, map4, Optional.empty(), null, executor).join();
assertEquals(Long.valueOf(90L), size);
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, size, ImmutableMap.copyOf(map4));
store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
// simple stream cut on epoch 2
Map<Long, Long> map5 = new HashMap<>();
map5.put(computeSegmentId(2, 1), 10L);
map5.put(computeSegmentId(3, 1), 10L);
size = store.getSizeTillStreamCut(scope, stream, map5, Optional.empty(), null, executor).join();
assertTrue(size == 100L);
StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime + 30, size, ImmutableMap.copyOf(map5));
store.addStreamCutToRetentionSet(scope, stream, streamCut5, null, executor).get();
// endregion
}
use of io.pravega.controller.store.stream.records.StreamCutRecord in project pravega by pravega.
the class StreamMetadataTasksTest method consumptionBasedRetentionWithNoSubscriber.
@Test(timeout = 30000)
public void consumptionBasedRetentionWithNoSubscriber() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(0L), Duration.ofMillis(Long.MAX_VALUE));
String stream1 = "consumptionSize4";
StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
// example::
// | s0 | s2 | s7 |
// | | |
// | | |
// | | | s4 | s6 | s8 | s10
// | s1 | s3 | s5 | | s9 |
// valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
// lower bound = { s0/off, s1/off }
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
long four = NameUtils.computeSegmentId(4, 2);
long five = NameUtils.computeSegmentId(5, 2);
long six = NameUtils.computeSegmentId(6, 3);
long seven = NameUtils.computeSegmentId(7, 4);
long eight = NameUtils.computeSegmentId(8, 4);
long nine = NameUtils.computeSegmentId(9, 4);
long ten = NameUtils.computeSegmentId(10, 5);
// 0, 1 -> 2, 3 with different split
scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// s3 -> 4, 5
scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
// 4,5 -> 6
scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// 2, 6 -> 7, 8, 9
scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
// 7, 8, 9 -> 10
scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
streamMetadataTasks.setRetentionFrequencyMillis(1L);
Map<Long, Long> map1 = new HashMap<>();
map1.put(ten, 2L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
// call retention and verify that retention policy applies
streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
// now retention set has one stream cut 10/2
// subscriber lowerbound is 0/1, 1/1.. trucation should happen at 10/2
VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
assertEquals(truncationRecord.getObject().getStreamCut().get(ten).longValue(), 2L);
assertTrue(truncationRecord.getObject().isUpdating());
streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
use of io.pravega.controller.store.stream.records.StreamCutRecord in project pravega by pravega.
the class StreamMetadataTasksTest method sizeBasedRetentionStreamTest.
@Test(timeout = 30000)
public void sizeBasedRetentionStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
String streamName = "test";
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, streamName, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, streamName, 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
// region size based retention on stream cuts on epoch 0
// region no previous streamcut
// first retention iteration
// streamcut1: 19 bytes(0/9,1/10)
long recordingTime1 = System.currentTimeMillis();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 9L);
map1.put(1L, 10L);
long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map1, Optional.empty(), null, executor).join();
assertEquals(size, 19);
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, size, ImmutableMap.copyOf(map1));
doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime1, null, "").get();
// verify that one streamCut is generated and added.
List<StreamCutRecord> list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
assertTrue(list.contains(streamCut1));
// endregion
// region stream cut exists but latest - previous < retention.size
// second retention iteration
// streamcut2: 100 bytes(0/50, 1/50)
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 50L);
map2.put(1L, 50L);
long recordingTime2 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map2, Optional.empty(), null, executor).join();
assertEquals(size, 100L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, size, ImmutableMap.copyOf(map2));
doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime2, null, "").get();
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
// verify that two stream cut is in retention set. streamCut2 is added
// verify that truncation did not happen
assertTrue(list.contains(streamCut1));
assertTrue(list.contains(streamCut2));
assertTrue(!truncProp.isUpdating());
// endregion
// region latest - previous > retention.size
// third retention iteration
// streamcut3: 120 bytes(0/60, 1/60)
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 60L);
map3.put(1L, 60L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map3, Optional.empty(), null, executor).join();
assertEquals(size, 120L);
long recordingTime3 = recordingTime2 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, size, ImmutableMap.copyOf(map3));
doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime3, null, "").get();
// verify two stream cuts are in retention set. Cut 2 and 3.
// verify that Truncation has happened.
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertTrue(!list.contains(streamCut1));
assertTrue(list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
assertTrue(truncProp.isUpdating());
assertTrue(truncProp.getStreamCut().get(0L) == 9L && truncProp.getStreamCut().get(1L) == 10L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(truncProp.isUpdating());
// endregion
// endregion
// region test retention over multiple epochs
// scale1 --> seal segments 0 and 1 and create 2 and 3. (0/70, 1/70)
List<AbstractMap.SimpleEntry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.5));
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 1.0));
Map<Long, Long> sealedSegmentsWithSize = new HashMap<>();
sealedSegmentsWithSize.put(0L, 70L);
sealedSegmentsWithSize.put(1L, 70L);
scale(SCOPE, streamName, sealedSegmentsWithSize, new ArrayList<>(newRanges));
long two = computeSegmentId(2, 1);
long three = computeSegmentId(3, 1);
// region latest streamcut on new epoch but latest (newepoch) - previous (oldepoch) < retention.size
// 4th retention iteration
// streamcut4: (2/29, 3/30)
Map<Long, Long> map4 = new HashMap<>();
map4.put(two, 29L);
map4.put(three, 30L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map4, Optional.empty(), null, executor).join();
assertEquals(size, 199L);
long recordingTime4 = recordingTime3 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, size, ImmutableMap.copyOf(map4));
doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime4, null, "").get();
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(list.contains(streamCut1));
assertTrue(list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
assertTrue(list.contains(streamCut4));
assertFalse(truncProp.isUpdating());
// endregion
// region latest streamcut on new epoch but latest (newepoch) - previous (oldepoch) > retention.size
// 5th retention iteration
// streamcut5: 221 bytes(2/41, 3/40)
Map<Long, Long> map5 = new HashMap<>();
map5.put(two, 41L);
map5.put(three, 40L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map5, Optional.empty(), null, executor).join();
assertEquals(size, 221L);
long recordingTime5 = recordingTime4 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime5, size, ImmutableMap.copyOf(map5));
doReturn(CompletableFuture.completedFuture(streamCut5)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime5, null, "").get();
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(list.contains(streamCut1));
assertFalse(list.contains(streamCut2));
assertFalse(list.contains(streamCut3));
assertTrue(list.contains(streamCut4));
assertTrue(list.contains(streamCut5));
assertTrue(truncProp.isUpdating());
assertTrue(truncProp.getStreamCut().get(0L) == 60L && truncProp.getStreamCut().get(1L) == 60L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(truncProp.isUpdating());
// endregion
// region test retention with external manual truncation
// scale2 --> split segment 2 to 4 and 5. Sealed size for segment 2 = 50
newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.25));
newRanges.add(new AbstractMap.SimpleEntry<>(0.25, 0.5));
sealedSegmentsWithSize = new HashMap<>();
sealedSegmentsWithSize.put(two, 50L);
scale(SCOPE, streamName, sealedSegmentsWithSize, new ArrayList<>(newRanges));
long four = computeSegmentId(4, 2);
long five = computeSegmentId(5, 2);
// region add streamcut on new epoch such that latest - oldest < retention.size
// streamcut6: 290 bytes (3/40, 4/30, 5/30)
// verify no new truncation happens..
Map<Long, Long> map6 = new HashMap<>();
map6.put(three, 40L);
map6.put(four, 30L);
map6.put(five, 30L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map6, Optional.empty(), null, executor).join();
assertEquals(size, 290L);
long recordingTime6 = recordingTime5 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut6 = new StreamCutRecord(recordingTime6, size, ImmutableMap.copyOf(map6));
doReturn(CompletableFuture.completedFuture(streamCut6)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime6, null, "").get();
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(list.contains(streamCut1));
assertFalse(list.contains(streamCut2));
assertFalse(list.contains(streamCut3));
assertTrue(list.contains(streamCut4));
assertTrue(list.contains(streamCut5));
assertTrue(list.contains(streamCut6));
assertFalse(truncProp.isUpdating());
// endregion
// truncate on manual streamcutManual: (1/65, 4/10, 5/10)
Map<Long, Long> streamCutManual = new HashMap<>();
streamCutManual.put(1L, 65L);
streamCutManual.put(four, 10L);
streamCutManual.put(five, 10L);
CompletableFuture<UpdateStreamStatus.Status> future = streamMetadataTasks.truncateStream(SCOPE, streamName, streamCutManual, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertTrue(Futures.await(future));
assertEquals(future.join(), UpdateStreamStatus.Status.SUCCESS);
// streamcut7: 340 bytes (3/50, 4/50, 5/50)
Map<Long, Long> map7 = new HashMap<>();
map7.put(three, 50L);
map7.put(four, 50L);
map7.put(five, 50L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map7, Optional.empty(), null, executor).join();
assertEquals(size, 340L);
long recordingTime7 = recordingTime6 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut7 = new StreamCutRecord(recordingTime7, size, ImmutableMap.copyOf(map7));
doReturn(CompletableFuture.completedFuture(streamCut7)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
// verify no new truncation.. streamcut5 should be chosen but discarded because it is not strictly-ahead-of-truncationRecord
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime7, null, "").join();
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(list.contains(streamCut1));
assertFalse(list.contains(streamCut2));
assertFalse(list.contains(streamCut3));
assertTrue(list.contains(streamCut4));
assertTrue(list.contains(streamCut5));
assertTrue(list.contains(streamCut6));
assertTrue(list.contains(streamCut7));
assertFalse(truncProp.isUpdating());
// streamcut8: 400 bytes (3/70, 4/70, 5/70)
Map<Long, Long> map8 = new HashMap<>();
map8.put(three, 70L);
map8.put(four, 70L);
map8.put(five, 70L);
size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map8, Optional.empty(), null, executor).join();
assertEquals(size, 400L);
long recordingTime8 = recordingTime7 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut8 = new StreamCutRecord(recordingTime8, size, ImmutableMap.copyOf(map8));
doReturn(CompletableFuture.completedFuture(streamCut8)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), anyString());
streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime8, null, "").get();
list = streamStorePartialMock.getRetentionSet(SCOPE, streamName, null, executor).thenCompose(retentionSet -> {
return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
}).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
// verify truncation happens at streamcut6
assertFalse(list.contains(streamCut1));
assertFalse(list.contains(streamCut2));
assertFalse(list.contains(streamCut3));
assertFalse(list.contains(streamCut4));
assertFalse(list.contains(streamCut5));
assertFalse(list.contains(streamCut6));
assertTrue(list.contains(streamCut7));
assertTrue(truncProp.isUpdating());
assertTrue(truncProp.getStreamCut().get(three) == 40L && truncProp.getStreamCut().get(four) == 30L && truncProp.getStreamCut().get(five) == 30L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, streamName, null, executor).get().getObject();
assertFalse(truncProp.isUpdating());
// endregion
// endregion
doCallRealMethod().when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
}
Aggregations