use of io.pravega.controller.store.stream.records.RetentionSet in project pravega by pravega.
the class PersistentStreamBase method deleteStreamCutBefore.
@Override
public CompletableFuture<Void> deleteStreamCutBefore(StreamCutReferenceRecord record, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
return getRetentionSetData(context).thenCompose(data -> {
RetentionSet retention = data.getObject();
RetentionSet update = RetentionSet.removeStreamCutBefore(retention, record);
List<StreamCutReferenceRecord> toRemove = retention.retentionRecordsBefore(record);
return Futures.allOf(toRemove.stream().map(x -> deleteStreamCutRecordData(x.getRecordingTime(), context)).collect(Collectors.toList())).thenCompose(x -> Futures.toVoid(updateRetentionSetData(new VersionedMetadata<>(update, data.getVersion()), context)));
});
}
use of io.pravega.controller.store.stream.records.RetentionSet in project pravega by pravega.
the class PersistentStreamBase method createHistoryRecords.
private CompletionStage<Void> createHistoryRecords(int startingSegmentNumber, CreateStreamResponse createStreamResponse, OperationContext context) {
Preconditions.checkNotNull(context, "operation context cannot be null");
final int numSegments = createStreamResponse.getConfiguration().getScalingPolicy().getMinNumSegments();
// create epoch 0 record
final double keyRangeChunk = 1.0 / numSegments;
long creationTime = createStreamResponse.getTimestamp();
final ImmutableList.Builder<StreamSegmentRecord> builder = ImmutableList.builder();
IntStream.range(0, numSegments).boxed().forEach(x -> builder.add(newSegmentRecord(0, startingSegmentNumber + x, creationTime, x * keyRangeChunk, (x + 1) * keyRangeChunk)));
EpochRecord epoch0 = new EpochRecord(0, 0, builder.build(), creationTime, 0L, 0L);
return createEpochRecord(epoch0, context).thenCompose(r -> createHistoryChunk(epoch0, context)).thenCompose(r -> createSealedSegmentSizeMapShardIfAbsent(0, context)).thenCompose(r -> createRetentionSetDataIfAbsent(new RetentionSet(ImmutableList.of()), context)).thenCompose(r -> createCurrentEpochRecordDataIfAbsent(epoch0, context));
}
use of io.pravega.controller.store.stream.records.RetentionSet in project pravega by pravega.
the class StreamMetadataTasks method getTruncationStreamCutByTimeLimit.
private CompletableFuture<Map<Long, Long>> getTruncationStreamCutByTimeLimit(String scope, String stream, OperationContext context, RetentionPolicy policy, RetentionSet retentionSet, Map<Long, Long> lowerBound) {
long currentTime = retentionClock.get().get();
// we get the streamcuts from retentionset that satisfy the min and max bounds with min pointing to most recent
// streamcut to satisfy both min and max bounds while max refering to oldest such streamcut in retention set.
// limits.key will refer to max and limit.value will refer to min.
Map.Entry<StreamCutReferenceRecord, StreamCutReferenceRecord> limits = getBoundStreamCuts(policy, retentionSet, x -> currentTime - x.getRecordingTime());
// if subscriber lowerbound is greater than (ahead of/after) streamcut corresponding to the max time and is less than
// (behind/before) stream cut for min time from the retention set then we can safely truncate at lowerbound.
// Else we will truncate at the max time bound if it exists
// 1. if LB is greater than (ahead of/after) min => truncate at min
// 2. if LB is less than (behind/before) max => truncate at max
// 3. if LB is less than (behind/before) min && LB is greater than (ahead of/after) max => truncate at LB
// 4. if LB is less than (behind/before) min && overlaps max => truncate at max
// 5. if LB overlaps with min and max ==> so its got both recent data and older data.
// we will truncate at a streamcut less than (behind/before) max in this case.
CompletableFuture<StreamCutRecord> limitMinFuture = limits.getValue() == null ? CompletableFuture.completedFuture(null) : streamMetadataStore.getStreamCutRecord(scope, stream, limits.getValue(), context, executor);
// if lowerbound is empty simply return min
if (lowerBound == null || lowerBound.isEmpty()) {
return limitMinFuture.thenApply(min -> Optional.ofNullable(min).map(StreamCutRecord::getStreamCut).orElse(null));
}
Optional<StreamCutReferenceRecord> maxBoundRef = retentionSet.getRetentionRecords().stream().filter(x -> currentTime - x.getRecordingTime() >= policy.getRetentionMax()).max(Comparator.comparingLong(StreamCutReferenceRecord::getRecordingTime));
CompletableFuture<StreamCutRecord> limitMaxFuture = limits.getKey() == null ? CompletableFuture.completedFuture(null) : streamMetadataStore.getStreamCutRecord(scope, stream, limits.getKey(), context, executor);
CompletableFuture<StreamCutRecord> maxBoundFuture = maxBoundRef.map(x -> streamMetadataStore.getStreamCutRecord(scope, stream, x, context, executor)).orElse(CompletableFuture.completedFuture(null));
return CompletableFuture.allOf(limitMaxFuture, limitMinFuture, maxBoundFuture).thenCompose(v -> {
StreamCutRecord limitMax = limitMaxFuture.join();
StreamCutRecord limitMin = limitMinFuture.join();
StreamCutRecord maxBound = maxBoundFuture.join();
if (limitMin != null) {
return streamMetadataStore.compareStreamCut(scope, stream, limitMin.getStreamCut(), lowerBound, context, executor).thenCompose(compareWithMin -> {
switch(compareWithMin) {
case EqualOrAfter:
// if it overlaps with limitmax, then we truncate at maxbound
return truncateAtLowerBoundOrMax(scope, stream, context, lowerBound, limitMax, maxBound);
case Overlaps:
// and we are choosing from retention set.
return getStreamcutBeforeLowerbound(scope, stream, context, retentionSet, lowerBound);
case Before:
// min is less than (behind/before) lb. truncate at min
return CompletableFuture.completedFuture(limitMin.getStreamCut());
default:
throw new IllegalArgumentException("Invalid Compare streamcut response");
}
});
} else {
return CompletableFuture.completedFuture(null);
}
});
}
use of io.pravega.controller.store.stream.records.RetentionSet in project pravega by pravega.
the class StreamMetadataStoreTest method streamCutReferenceRecordBeforeTest.
@Test(timeout = 30000)
public void streamCutReferenceRecordBeforeTest() throws Exception {
final String scope = "ScopeRetain2";
final String stream = "StreamRetain";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofDays(2).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 1L);
map1.put(1L, 1L);
long recordingTime = 1;
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, Long.MIN_VALUE, ImmutableMap.copyOf(map1));
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 10L);
map2.put(1L, 10L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, Long.MIN_VALUE, ImmutableMap.copyOf(map2));
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 20L);
map3.put(1L, 20L);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
Map<Long, Long> streamCut = new HashMap<>();
RetentionSet retentionSet = store.getRetentionSet(scope, stream, null, executor).join();
// 0/0, 1/1 ..there should be nothing before it
streamCut.put(0L, 0L);
streamCut.put(1L, 1L);
StreamCutReferenceRecord beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/1, 1/1 .. sc1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/5, 1/5 .. sc1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/0, 1/5 .. nothing
streamCut.put(0L, 0L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/10, 1/10 ... sc2
streamCut.put(0L, 10L);
streamCut.put(1L, 10L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/9, 1/15 ... sc1
streamCut.put(0L, 9L);
streamCut.put(1L, 15L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/19, 1/20 ... sc2
streamCut.put(0L, 19L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/20, 1/20 ... sc3
streamCut.put(0L, 20L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut3.getRecordingTime());
// 0/21, 1/21 ... sc3
streamCut.put(0L, 21L);
streamCut.put(1L, 21L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut3.getRecordingTime());
// now add another entry so that we have even number of records and and repeat the test
// but here we make sure we are still using map3 but adding the time. we should always pick the latest if there
// are subsequent streamcutrecords with identical streamcuts.
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
retentionSet = store.getRetentionSet(scope, stream, null, executor).join();
// 0/0, 1/1 ..there should be nothing before it
streamCut.put(0L, 0L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/1, 1/1 .. 0/1, 1/1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/5, 1/5 .. 0/1, 1/1
streamCut.put(0L, 5L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/0, 1/5 .. nothing
streamCut.put(0L, 0L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/10, 1/10 ... 0/10, 1/10
streamCut.put(0L, 10L);
streamCut.put(1L, 10L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/9, 1/15 ... 0/1, 1/1
streamCut.put(0L, 9L);
streamCut.put(1L, 15L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/19, 1/20 ... 0/10, 1/10
streamCut.put(0L, 19L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/20, 1/20 ... 0/20, 1/20
streamCut.put(0L, 20L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
// 0/21, 1/21 ... 0/20, 1/20
streamCut.put(0L, 21L);
streamCut.put(1L, 21L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
// 0/31, 1/31 ... 0/30, 1/30
streamCut.put(0L, 30L);
streamCut.put(1L, 30L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
}
use of io.pravega.controller.store.stream.records.RetentionSet in project pravega by pravega.
the class ControllerMetadataJsonSerializerTest method testRetentionSet.
@Test
public void testRetentionSet() {
StreamCutReferenceRecord refRecord1 = StreamCutReferenceRecord.builder().recordingSize(0L).recordingTime(10L).build();
StreamCutReferenceRecord refRecord2 = StreamCutReferenceRecord.builder().recordingSize(1L).recordingTime(11L).build();
RetentionSet record = new RetentionSet(ImmutableList.of(refRecord1, refRecord2));
testRecordSerialization(record, RetentionSet.class);
}
Aggregations