use of io.pravega.controller.store.stream.records.StreamCutReferenceRecord in project pravega by pravega.
the class StreamMetadataTasks method getBoundStreamCuts.
private Map.Entry<StreamCutReferenceRecord, StreamCutReferenceRecord> getBoundStreamCuts(RetentionPolicy policy, RetentionSet retentionSet, Function<StreamCutReferenceRecord, Long> delta) {
AtomicReference<StreamCutReferenceRecord> max = new AtomicReference<>();
AtomicReference<StreamCutReferenceRecord> min = new AtomicReference<>();
// We loop through all the streamcuts in the retention set and find two streamcuts that satisfy min
// and max bounds in the policy. The policy can be either size or time based and the caller passes a delta function
// that is applied on each stream cut which tells us the size/time worth of data that is retained if truncated at
// a particular cut.
// Do note that if min is NOT satisfied by a streamcut then it implicitly does not satisfy max either.
// However, satisfying min is no guarantee that the same streamcut satisfies the max policy as well.
// So it is possible that all streamcuts in retentionset do not satisfy max while each satisfying min. In this case
// we choose the most recent streamcut as max (which was also the min).
AtomicLong maxSoFar = new AtomicLong(Long.MIN_VALUE);
AtomicLong minSoFar = new AtomicLong(Long.MAX_VALUE);
retentionSet.getRetentionRecords().forEach(x -> {
long value = delta.apply(x);
if (value >= policy.getRetentionParam() && value <= policy.getRetentionMax() && value > maxSoFar.get()) {
max.set(x);
maxSoFar.set(value);
}
if (value >= policy.getRetentionParam() && value < minSoFar.get()) {
min.set(x);
minSoFar.set(value);
}
});
if (max.get() == null) {
// if we are unable to find a streamcut that satisfies max policy constraint, but there is
// a min streamcut bound which was actually beyond the max constraint, we will set max to min.
max.set(min.get());
}
return new AbstractMap.SimpleEntry<>(max.get(), min.get());
}
use of io.pravega.controller.store.stream.records.StreamCutReferenceRecord in project pravega by pravega.
the class StreamMetadataTasks method getTruncationStreamCutByTimeLimit.
private CompletableFuture<Map<Long, Long>> getTruncationStreamCutByTimeLimit(String scope, String stream, OperationContext context, RetentionPolicy policy, RetentionSet retentionSet, Map<Long, Long> lowerBound) {
long currentTime = retentionClock.get().get();
// we get the streamcuts from retentionset that satisfy the min and max bounds with min pointing to most recent
// streamcut to satisfy both min and max bounds while max refering to oldest such streamcut in retention set.
// limits.key will refer to max and limit.value will refer to min.
Map.Entry<StreamCutReferenceRecord, StreamCutReferenceRecord> limits = getBoundStreamCuts(policy, retentionSet, x -> currentTime - x.getRecordingTime());
// if subscriber lowerbound is greater than (ahead of/after) streamcut corresponding to the max time and is less than
// (behind/before) stream cut for min time from the retention set then we can safely truncate at lowerbound.
// Else we will truncate at the max time bound if it exists
// 1. if LB is greater than (ahead of/after) min => truncate at min
// 2. if LB is less than (behind/before) max => truncate at max
// 3. if LB is less than (behind/before) min && LB is greater than (ahead of/after) max => truncate at LB
// 4. if LB is less than (behind/before) min && overlaps max => truncate at max
// 5. if LB overlaps with min and max ==> so its got both recent data and older data.
// we will truncate at a streamcut less than (behind/before) max in this case.
CompletableFuture<StreamCutRecord> limitMinFuture = limits.getValue() == null ? CompletableFuture.completedFuture(null) : streamMetadataStore.getStreamCutRecord(scope, stream, limits.getValue(), context, executor);
// if lowerbound is empty simply return min
if (lowerBound == null || lowerBound.isEmpty()) {
return limitMinFuture.thenApply(min -> Optional.ofNullable(min).map(StreamCutRecord::getStreamCut).orElse(null));
}
Optional<StreamCutReferenceRecord> maxBoundRef = retentionSet.getRetentionRecords().stream().filter(x -> currentTime - x.getRecordingTime() >= policy.getRetentionMax()).max(Comparator.comparingLong(StreamCutReferenceRecord::getRecordingTime));
CompletableFuture<StreamCutRecord> limitMaxFuture = limits.getKey() == null ? CompletableFuture.completedFuture(null) : streamMetadataStore.getStreamCutRecord(scope, stream, limits.getKey(), context, executor);
CompletableFuture<StreamCutRecord> maxBoundFuture = maxBoundRef.map(x -> streamMetadataStore.getStreamCutRecord(scope, stream, x, context, executor)).orElse(CompletableFuture.completedFuture(null));
return CompletableFuture.allOf(limitMaxFuture, limitMinFuture, maxBoundFuture).thenCompose(v -> {
StreamCutRecord limitMax = limitMaxFuture.join();
StreamCutRecord limitMin = limitMinFuture.join();
StreamCutRecord maxBound = maxBoundFuture.join();
if (limitMin != null) {
return streamMetadataStore.compareStreamCut(scope, stream, limitMin.getStreamCut(), lowerBound, context, executor).thenCompose(compareWithMin -> {
switch(compareWithMin) {
case EqualOrAfter:
// if it overlaps with limitmax, then we truncate at maxbound
return truncateAtLowerBoundOrMax(scope, stream, context, lowerBound, limitMax, maxBound);
case Overlaps:
// and we are choosing from retention set.
return getStreamcutBeforeLowerbound(scope, stream, context, retentionSet, lowerBound);
case Before:
// min is less than (behind/before) lb. truncate at min
return CompletableFuture.completedFuture(limitMin.getStreamCut());
default:
throw new IllegalArgumentException("Invalid Compare streamcut response");
}
});
} else {
return CompletableFuture.completedFuture(null);
}
});
}
use of io.pravega.controller.store.stream.records.StreamCutReferenceRecord in project pravega by pravega.
the class PersistentStreamBase method deleteStreamCutBefore.
@Override
public CompletableFuture<Void> deleteStreamCutBefore(StreamCutReferenceRecord record, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
return getRetentionSetData(context).thenCompose(data -> {
RetentionSet retention = data.getObject();
RetentionSet update = RetentionSet.removeStreamCutBefore(retention, record);
List<StreamCutReferenceRecord> toRemove = retention.retentionRecordsBefore(record);
return Futures.allOf(toRemove.stream().map(x -> deleteStreamCutRecordData(x.getRecordingTime(), context)).collect(Collectors.toList())).thenCompose(x -> Futures.toVoid(updateRetentionSetData(new VersionedMetadata<>(update, data.getVersion()), context)));
});
}
use of io.pravega.controller.store.stream.records.StreamCutReferenceRecord in project pravega by pravega.
the class StreamMetadataStoreTest method streamCutReferenceRecordBeforeTest.
@Test(timeout = 30000)
public void streamCutReferenceRecordBeforeTest() throws Exception {
final String scope = "ScopeRetain2";
final String stream = "StreamRetain";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofDays(2).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 1L);
map1.put(1L, 1L);
long recordingTime = 1;
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, Long.MIN_VALUE, ImmutableMap.copyOf(map1));
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 10L);
map2.put(1L, 10L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, Long.MIN_VALUE, ImmutableMap.copyOf(map2));
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 20L);
map3.put(1L, 20L);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
Map<Long, Long> streamCut = new HashMap<>();
RetentionSet retentionSet = store.getRetentionSet(scope, stream, null, executor).join();
// 0/0, 1/1 ..there should be nothing before it
streamCut.put(0L, 0L);
streamCut.put(1L, 1L);
StreamCutReferenceRecord beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/1, 1/1 .. sc1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/5, 1/5 .. sc1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/0, 1/5 .. nothing
streamCut.put(0L, 0L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/10, 1/10 ... sc2
streamCut.put(0L, 10L);
streamCut.put(1L, 10L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/9, 1/15 ... sc1
streamCut.put(0L, 9L);
streamCut.put(1L, 15L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/19, 1/20 ... sc2
streamCut.put(0L, 19L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/20, 1/20 ... sc3
streamCut.put(0L, 20L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut3.getRecordingTime());
// 0/21, 1/21 ... sc3
streamCut.put(0L, 21L);
streamCut.put(1L, 21L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut3.getRecordingTime());
// now add another entry so that we have even number of records and and repeat the test
// but here we make sure we are still using map3 but adding the time. we should always pick the latest if there
// are subsequent streamcutrecords with identical streamcuts.
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
retentionSet = store.getRetentionSet(scope, stream, null, executor).join();
// 0/0, 1/1 ..there should be nothing before it
streamCut.put(0L, 0L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/1, 1/1 .. 0/1, 1/1
streamCut.put(0L, 1L);
streamCut.put(1L, 1L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/5, 1/5 .. 0/1, 1/1
streamCut.put(0L, 5L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/0, 1/5 .. nothing
streamCut.put(0L, 0L);
streamCut.put(1L, 5L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertNull(beforeRef);
// 0/10, 1/10 ... 0/10, 1/10
streamCut.put(0L, 10L);
streamCut.put(1L, 10L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/9, 1/15 ... 0/1, 1/1
streamCut.put(0L, 9L);
streamCut.put(1L, 15L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut1.getRecordingTime());
// 0/19, 1/20 ... 0/10, 1/10
streamCut.put(0L, 19L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut2.getRecordingTime());
// 0/20, 1/20 ... 0/20, 1/20
streamCut.put(0L, 20L);
streamCut.put(1L, 20L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
// 0/21, 1/21 ... 0/20, 1/20
streamCut.put(0L, 21L);
streamCut.put(1L, 21L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
// 0/31, 1/31 ... 0/30, 1/30
streamCut.put(0L, 30L);
streamCut.put(1L, 30L);
beforeRef = store.findStreamCutReferenceRecordBefore(scope, stream, streamCut, retentionSet, null, executor).join();
assertEquals(beforeRef.getRecordingTime(), streamCut4.getRecordingTime());
}
use of io.pravega.controller.store.stream.records.StreamCutReferenceRecord in project pravega by pravega.
the class StreamMetadataTasks method retention.
/**
* Method to check retention policy and generate new periodic cuts and/or truncate stream at an existing stream cut.
*
* @param scope scope
* @param stream stream
* @param policy retention policy
* @param recordingTime time of recording
* @param contextOpt operation context
* @param delegationToken token to be sent to segmentstore to authorize this operation.
* @return future.
*/
public CompletableFuture<Void> retention(final String scope, final String stream, final RetentionPolicy policy, final long recordingTime, final OperationContext contextOpt, final String delegationToken) {
Preconditions.checkNotNull(policy);
final OperationContext context = contextOpt != null ? contextOpt : streamMetadataStore.createStreamContext(scope, stream, ControllerService.nextRequestId());
return streamMetadataStore.getRetentionSet(scope, stream, context, executor).thenCompose(retentionSet -> {
StreamCutReferenceRecord latestCut = retentionSet.getLatest();
return generateStreamCutIfRequired(scope, stream, latestCut, recordingTime, context, delegationToken).thenCompose(newRecord -> truncate(scope, stream, policy, context, retentionSet, newRecord));
}).thenAccept(x -> StreamMetrics.reportRetentionEvent(scope, stream));
}
Aggregations