use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.
the class StreamMetadataTasksTest method timeBasedRetentionStreamTest.
@Test(timeout = 30000)
public void timeBasedRetentionStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofMinutes(60).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scope(SCOPE).streamName("test").scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test").get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
long recordingTime1 = System.currentTimeMillis();
Map<Integer, Long> map1 = new HashMap<>();
map1.put(0, 1L);
map1.put(1, 1L);
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, Long.MIN_VALUE, map1);
doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
any());
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime1, null, "").get();
// verify that one streamCut is generated and added.
List<StreamCutRecord> list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
assertTrue(list.contains(streamCut1));
Map<Integer, Long> map2 = new HashMap<>();
map2.put(0, 10L);
map2.put(1, 10L);
long recordingTime2 = recordingTime1 + Duration.ofMinutes(5).toMillis();
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, Long.MIN_VALUE, map2);
doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
any());
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime2, null, "").get();
list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
StreamProperty<StreamTruncationRecord> truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).get();
// verify that only one stream cut is in retention set. streamCut2 is not added
// verify that truncation did not happen
assertTrue(list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(!truncProp.isUpdating());
Map<Integer, Long> map3 = new HashMap<>();
map3.put(0, 20L);
map3.put(1, 20L);
long recordingTime3 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, Long.MIN_VALUE, map3);
doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
any());
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime3, null, "").get();
// verify two stream cuts are in retention set. Cut 1 and 3.
// verify that Truncation not not happened.
list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).get();
assertTrue(list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
assertTrue(!truncProp.isUpdating());
Map<Integer, Long> map4 = new HashMap<>();
map4.put(0, 20L);
map4.put(1, 20L);
long recordingTime4 = recordingTime1 + retentionPolicy.getRetentionParam() + 2;
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, Long.MIN_VALUE, map4);
doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any());
streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime4, null, "").get();
// verify that only two stream cut are in retention set. streamcut 3 and 4
// verify that truncation has started. verify that streamCut1 is removed from retention set as that has been used for truncation
list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, "test", null, executor).get();
truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, "test", true, null, executor).get();
assertTrue(!list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
assertTrue(list.contains(streamCut4));
assertTrue(truncProp.isUpdating());
assertTrue(truncProp.getProperty().getStreamCut().get(0) == 1L && truncProp.getProperty().getStreamCut().get(1) == 1L);
}
use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.
the class StreamMetadataTasksTest method retentionPolicyUpdateTest.
@Test(timeout = 30000)
public void retentionPolicyUpdateTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
String stream = "test";
final StreamConfiguration noRetentionConfig = StreamConfiguration.builder().scope(SCOPE).streamName(stream).scalingPolicy(policy).build();
// add stream without retention policy
streamMetadataTasks.createStreamBody(SCOPE, stream, noRetentionConfig, System.currentTimeMillis()).join();
String scopedStreamName = String.format("%s/%s", SCOPE, stream);
// verify that stream is not added to bucket
assertTrue(!streamStorePartialMock.getStreamsForBucket(0, executor).join().contains(scopedStreamName));
UpdateStreamTask task = new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, executor);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofMinutes(60).toMillis()).build();
final StreamConfiguration withRetentionConfig = StreamConfiguration.builder().scope(SCOPE).streamName(stream).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
// now update stream with a retention policy
streamStorePartialMock.startUpdateConfiguration(SCOPE, stream, withRetentionConfig, null, executor).join();
UpdateStreamEvent update = new UpdateStreamEvent(SCOPE, stream);
task.execute(update).join();
// verify that bucket has the stream.
assertTrue(streamStorePartialMock.getStreamsForBucket(0, executor).join().contains(scopedStreamName));
// update stream such that stream is updated with null retention policy
streamStorePartialMock.startUpdateConfiguration(SCOPE, stream, noRetentionConfig, null, executor).join();
task.execute(update).join();
// verify that the stream is no longer present in the bucket
assertTrue(!streamStorePartialMock.getStreamsForBucket(0, executor).join().contains(scopedStreamName));
}
use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.
the class StreamMetadataStoreTest method retentionSetTest.
@Test
public void retentionSetTest() throws Exception {
final String scope = "ScopeRetain";
final String stream = "StreamRetain";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofDays(2).toMillis()).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
AtomicReference<BucketChangeListener.StreamNotification> notificationRef = new AtomicReference<>();
store.registerBucketChangeListener(0, notificationRef::set);
store.unregisterBucketListener(0);
store.addUpdateStreamForAutoStreamCut(scope, stream, retentionPolicy, null, executor).get();
List<String> streams = store.getStreamsForBucket(0, executor).get();
assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
Map<Integer, Long> map1 = new HashMap<>();
map1.put(0, 0L);
map1.put(1, 0L);
long recordingTime = System.currentTimeMillis();
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, Long.MIN_VALUE, map1);
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Integer, Long> map2 = new HashMap<>();
map2.put(0, 10L);
map2.put(1, 10L);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, Long.MIN_VALUE, map2);
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Integer, Long> map3 = new HashMap<>();
map3.put(0, 20L);
map3.put(1, 20L);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, Long.MIN_VALUE, map3);
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
List<StreamCutRecord> list = store.getStreamCutsFromRetentionSet(scope, stream, null, executor).get();
assertTrue(list.contains(streamCut1));
assertTrue(list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
store.deleteStreamCutBefore(scope, stream, streamCut2, null, executor).get();
list = store.getStreamCutsFromRetentionSet(scope, stream, null, executor).get();
assertTrue(!list.contains(streamCut1));
assertTrue(!list.contains(streamCut2));
assertTrue(list.contains(streamCut3));
store.removeStreamFromAutoStreamCut(scope, stream, null, executor).get();
streams = store.getStreamsForBucket(0, executor).get();
assertTrue(!streams.contains(String.format("%s/%s", scope, stream)));
}
use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.
the class ZKStreamMetadataStore method addUpdateStreamForAutoStreamCut.
@Override
public CompletableFuture<Void> addUpdateStreamForAutoStreamCut(final String scope, final String stream, final RetentionPolicy retentionPolicy, final OperationContext context, final Executor executor) {
Preconditions.checkNotNull(retentionPolicy);
int bucket = getBucket(scope, stream);
String retentionPath = String.format(ZKStoreHelper.RETENTION_PATH, bucket, encodedScopedStreamName(scope, stream));
byte[] serialize = SerializationUtils.serialize(retentionPolicy);
return storeHelper.getData(retentionPath).exceptionally(e -> {
if (e instanceof StoreException.DataNotFoundException) {
return null;
} else {
throw new CompletionException(e);
}
}).thenCompose(data -> {
if (data == null) {
return storeHelper.createZNodeIfNotExist(retentionPath, serialize);
} else {
return storeHelper.setData(retentionPath, new Data<>(serialize, data.getVersion()));
}
});
}
use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.
the class StreamMetadataTasks method retention.
/**
* Method to check retention policy and generate new periodic cuts and/or truncate stream at an existing stream cut.
*
* @param scope scope
* @param stream stream
* @param policy retention policy
* @param recordingTime time of recording
* @param contextOpt operation context
* @param delegationToken token to be sent to segmentstore to authorize this operation.
* @return future.
*/
public CompletableFuture<Void> retention(final String scope, final String stream, final RetentionPolicy policy, final long recordingTime, final OperationContext contextOpt, final String delegationToken) {
Preconditions.checkNotNull(policy);
final OperationContext context = contextOpt == null ? streamMetadataStore.createContext(scope, stream) : contextOpt;
return streamMetadataStore.getStreamCutsFromRetentionSet(scope, stream, context, executor).thenCompose(retentionSet -> {
StreamCutRecord latestCut = retentionSet.stream().max(Comparator.comparingLong(StreamCutRecord::getRecordingTime)).orElse(null);
return checkGenerateStreamCut(scope, stream, context, latestCut, recordingTime, delegationToken).thenCompose(newRecord -> truncate(scope, stream, policy, context, retentionSet, newRecord, recordingTime));
}).thenAccept(x -> DYNAMIC_LOGGER.recordMeterEvents(nameFromStream(RETENTION_FREQUENCY, scope, stream), 1));
}
Aggregations