Search in sources :

Example 1 with RetentionPolicy

use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.

the class ModelHelper method getUpdateStreamConfig.

/**
 * This method translates the REST request object UpdateStreamRequest into internal object StreamConfiguration.
 *
 * @param updateStreamRequest An object conforming to the updateStreamConfig REST API json
 * @param scope               The scope of stream
 * @param stream              The name of stream
 * @return StreamConfiguration internal object
 */
public static final StreamConfiguration getUpdateStreamConfig(final UpdateStreamRequest updateStreamRequest, final String scope, final String stream) {
    ScalingPolicy scalingPolicy;
    if (updateStreamRequest.getScalingPolicy().getType() == ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS) {
        scalingPolicy = ScalingPolicy.fixed(updateStreamRequest.getScalingPolicy().getMinSegments());
    } else if (updateStreamRequest.getScalingPolicy().getType() == ScalingConfig.TypeEnum.BY_RATE_IN_EVENTS_PER_SEC) {
        scalingPolicy = ScalingPolicy.byEventRate(updateStreamRequest.getScalingPolicy().getTargetRate(), updateStreamRequest.getScalingPolicy().getScaleFactor(), updateStreamRequest.getScalingPolicy().getMinSegments());
    } else {
        scalingPolicy = ScalingPolicy.byDataRate(updateStreamRequest.getScalingPolicy().getTargetRate(), updateStreamRequest.getScalingPolicy().getScaleFactor(), updateStreamRequest.getScalingPolicy().getMinSegments());
    }
    RetentionPolicy retentionPolicy = null;
    if (updateStreamRequest.getRetentionPolicy() != null) {
        switch(updateStreamRequest.getRetentionPolicy().getType()) {
            case LIMITED_SIZE_MB:
                retentionPolicy = RetentionPolicy.bySizeBytes(updateStreamRequest.getRetentionPolicy().getValue() * 1024 * 1024);
                break;
            case LIMITED_DAYS:
                retentionPolicy = RetentionPolicy.byTime(Duration.ofDays(updateStreamRequest.getRetentionPolicy().getValue()));
                break;
        }
    }
    return StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(scalingPolicy).retentionPolicy(retentionPolicy).build();
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) RetentionPolicy(io.pravega.client.stream.RetentionPolicy)

Example 2 with RetentionPolicy

use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.

the class ModelHelper method getCreateStreamConfig.

/**
 * This method translates the REST request object CreateStreamRequest into internal object StreamConfiguration.
 *
 * @param createStreamRequest An object conforming to the createStream REST API json
 * @param scope               The scope of stream
 * @return StreamConfiguration internal object
 */
public static final StreamConfiguration getCreateStreamConfig(final CreateStreamRequest createStreamRequest, final String scope) {
    ScalingPolicy scalingPolicy;
    if (createStreamRequest.getScalingPolicy().getType() == ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS) {
        scalingPolicy = ScalingPolicy.fixed(createStreamRequest.getScalingPolicy().getMinSegments());
    } else if (createStreamRequest.getScalingPolicy().getType() == ScalingConfig.TypeEnum.BY_RATE_IN_EVENTS_PER_SEC) {
        scalingPolicy = ScalingPolicy.byEventRate(createStreamRequest.getScalingPolicy().getTargetRate(), createStreamRequest.getScalingPolicy().getScaleFactor(), createStreamRequest.getScalingPolicy().getMinSegments());
    } else {
        scalingPolicy = ScalingPolicy.byDataRate(createStreamRequest.getScalingPolicy().getTargetRate(), createStreamRequest.getScalingPolicy().getScaleFactor(), createStreamRequest.getScalingPolicy().getMinSegments());
    }
    RetentionPolicy retentionPolicy = null;
    if (createStreamRequest.getRetentionPolicy() != null) {
        switch(createStreamRequest.getRetentionPolicy().getType()) {
            case LIMITED_SIZE_MB:
                retentionPolicy = RetentionPolicy.bySizeBytes(createStreamRequest.getRetentionPolicy().getValue() * 1024 * 1024);
                break;
            case LIMITED_DAYS:
                retentionPolicy = RetentionPolicy.byTime(Duration.ofDays(createStreamRequest.getRetentionPolicy().getValue()));
                break;
        }
    }
    return StreamConfiguration.builder().scope(scope).streamName(createStreamRequest.getStreamName()).scalingPolicy(scalingPolicy).retentionPolicy(retentionPolicy).build();
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) RetentionPolicy(io.pravega.client.stream.RetentionPolicy)

Example 3 with RetentionPolicy

use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.

the class StreamMetadataTasksTest method sizeBasedRetentionStreamTest.

@Test(timeout = 30000)
public void sizeBasedRetentionStreamTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
    String streamName = "test";
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(SCOPE).streamName(streamName).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, streamName, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, streamName).get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    // region size based retention on stream cuts on epoch 0
    // region no previous streamcut
    // first retention iteration
    // streamcut1: 19 bytes(0/9,1/10)
    long recordingTime1 = System.currentTimeMillis();
    Map<Integer, Long> map1 = new HashMap<>();
    map1.put(0, 9L);
    map1.put(1, 10L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map1, null, executor).join();
    assertEquals(size, 19);
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, size, map1);
    doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime1, null, "").get();
    // verify that one streamCut is generated and added.
    List<StreamCutRecord> list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    assertTrue(list.contains(streamCut1));
    // endregion
    // region stream cut exists but latest - previous < retention.size
    // second retention iteration
    // streamcut2: 100 bytes(0/50, 1/50)
    Map<Integer, Long> map2 = new HashMap<>();
    map2.put(0, 50L);
    map2.put(1, 50L);
    long recordingTime2 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map2, null, executor).join();
    assertEquals(size, 100L);
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, size, map2);
    doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime2, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    StreamProperty<StreamTruncationRecord> truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    // verify that two stream cut is in retention set. streamCut2 is added
    // verify that truncation did not happen
    assertTrue(list.contains(streamCut1));
    assertTrue(list.contains(streamCut2));
    assertTrue(!truncProp.isUpdating());
    // endregion
    // region latest - previous > retention.size
    // third retention iteration
    // streamcut3: 120 bytes(0/60, 1/60)
    Map<Integer, Long> map3 = new HashMap<>();
    map3.put(0, 60L);
    map3.put(1, 60L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map3, null, executor).join();
    assertEquals(size, 120L);
    long recordingTime3 = recordingTime2 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, size, map3);
    doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime3, null, "").get();
    // verify two stream cuts are in retention set. Cut 2 and 3.
    // verify that Truncation has happened.
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertTrue(!list.contains(streamCut1));
    assertTrue(list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(0) == 9L && truncProp.getProperty().getStreamCut().get(1) == 10L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(truncProp.isUpdating());
    // endregion
    // endregion
    // region test retention over multiple epochs
    // scale1 --> seal segments 0 and 1 and create 2 and 3. (0/70, 1/70)
    List<AbstractMap.SimpleEntry<Double, Double>> newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.5));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 1.0));
    Map<Integer, Long> sealedSegmentsWithSize = new HashMap<>();
    sealedSegmentsWithSize.put(0, 70L);
    sealedSegmentsWithSize.put(1, 70L);
    scale(SCOPE, streamName, sealedSegmentsWithSize, newRanges);
    // region latest streamcut on new epoch but latest (newepoch) - previous (oldepoch) < retention.size
    // 4th retention iteration
    // streamcut4: (2/29, 3/30)
    Map<Integer, Long> map4 = new HashMap<>();
    map4.put(2, 29L);
    map4.put(3, 30L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map4, null, executor).join();
    assertEquals(size, 199L);
    long recordingTime4 = recordingTime3 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, size, map4);
    doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime4, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertTrue(list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertFalse(truncProp.isUpdating());
    // endregion
    // region latest streamcut on new epoch but latest (newepoch) - previous (oldepoch) > retention.size
    // 5th retention iteration
    // streamcut5: 221 bytes(2/41, 3/40)
    Map<Integer, Long> map5 = new HashMap<>();
    map5.put(2, 41L);
    map5.put(3, 40L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map5, null, executor).join();
    assertEquals(size, 221L);
    long recordingTime5 = recordingTime4 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime5, size, map5);
    doReturn(CompletableFuture.completedFuture(streamCut5)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime5, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(list.contains(streamCut5));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(0) == 60L && truncProp.getProperty().getStreamCut().get(1) == 60L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(truncProp.isUpdating());
    // endregion
    // region test retention with external manual truncation
    // scale2 -->  split segment 2 to 4 and 5. Sealed size for segment 2 = 50
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.25));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.25, 0.5));
    sealedSegmentsWithSize = new HashMap<>();
    sealedSegmentsWithSize.put(2, 50L);
    scale(SCOPE, streamName, sealedSegmentsWithSize, newRanges);
    // region add streamcut on new epoch such that latest - oldest < retention.size
    // streamcut6: 290 bytes (3/40, 4/30, 5/30)
    // verify no new truncation happens..
    Map<Integer, Long> map6 = new HashMap<>();
    map6.put(3, 40L);
    map6.put(4, 30L);
    map6.put(5, 30L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map6, null, executor).join();
    assertEquals(size, 290L);
    long recordingTime6 = recordingTime5 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut6 = new StreamCutRecord(recordingTime6, size, map6);
    doReturn(CompletableFuture.completedFuture(streamCut6)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime6, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(list.contains(streamCut5));
    assertTrue(list.contains(streamCut6));
    assertFalse(truncProp.isUpdating());
    // endregion
    // truncate on manual streamcutManual: (1/65, 4/10, 5/10)
    Map<Integer, Long> streamCutManual = new HashMap<>();
    streamCutManual.put(1, 65L);
    streamCutManual.put(4, 10L);
    streamCutManual.put(5, 10L);
    CompletableFuture<UpdateStreamStatus.Status> future = streamMetadataTasks.truncateStream(SCOPE, streamName, streamCutManual, null);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertTrue(Futures.await(future));
    assertEquals(future.join(), UpdateStreamStatus.Status.SUCCESS);
    // streamcut7: 340 bytes (3/50, 4/50, 5/50)
    Map<Integer, Long> map7 = new HashMap<>();
    map7.put(3, 50L);
    map7.put(4, 50L);
    map7.put(5, 50L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map7, null, executor).join();
    assertEquals(size, 340L);
    long recordingTime7 = recordingTime6 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut7 = new StreamCutRecord(recordingTime7, size, map7);
    doReturn(CompletableFuture.completedFuture(streamCut7)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    // verify no new truncation.. streamcut5 should be chosen but discarded because it is not strictly-ahead-of-truncationRecord
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime7, null, "").join();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(list.contains(streamCut5));
    assertTrue(list.contains(streamCut6));
    assertTrue(list.contains(streamCut7));
    assertFalse(truncProp.isUpdating());
    // streamcut8: 400 bytes (3/70, 4/70, 5/70)
    Map<Integer, Long> map8 = new HashMap<>();
    map8.put(3, 70L);
    map8.put(4, 70L);
    map8.put(5, 70L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, streamName, map8, null, executor).join();
    assertEquals(size, 400L);
    long recordingTime8 = recordingTime7 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut8 = new StreamCutRecord(recordingTime8, size, map8);
    doReturn(CompletableFuture.completedFuture(streamCut8)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), anyString());
    streamMetadataTasks.retention(SCOPE, streamName, retentionPolicy, recordingTime8, null, "").get();
    list = streamStorePartialMock.getStreamCutsFromRetentionSet(SCOPE, streamName, null, executor).get();
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    // verify truncation happens at streamcut6
    assertFalse(list.contains(streamCut1));
    assertFalse(list.contains(streamCut2));
    assertFalse(list.contains(streamCut3));
    assertFalse(list.contains(streamCut4));
    assertFalse(list.contains(streamCut5));
    assertFalse(list.contains(streamCut6));
    assertTrue(list.contains(streamCut7));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getProperty().getStreamCut().get(3) == 40L && truncProp.getProperty().getStreamCut().get(4) == 30L && truncProp.getProperty().getStreamCut().get(5) == 30L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    truncProp = streamStorePartialMock.getTruncationProperty(SCOPE, streamName, true, null, executor).get();
    assertFalse(truncProp.isUpdating());
// endregion
// endregion
}
Also used : ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AbstractMap(java.util.AbstractMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamCutRecord(io.pravega.controller.store.stream.StreamCutRecord) Test(org.junit.Test)

Example 4 with RetentionPolicy

use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.

the class StreamMetadataStoreTest method sizeTest.

@Test
public void sizeTest() throws Exception {
    final String scope = "ScopeSize";
    final String stream = "StreamSize";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    store.addUpdateStreamForAutoStreamCut(scope, stream, retentionPolicy, null, executor).get();
    List<String> streams = store.getStreamsForBucket(0, executor).get();
    assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
    // region Size Computation on stream cuts on epoch 0
    Map<Integer, Long> map1 = new HashMap<>();
    map1.put(0, 10L);
    map1.put(1, 10L);
    Long size = store.getSizeTillStreamCut(scope, stream, map1, null, executor).join();
    assertTrue(size == 20L);
    long recordingTime = System.currentTimeMillis();
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, size, map1);
    store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
    Map<Integer, Long> map2 = new HashMap<>();
    map2.put(0, 20L);
    map2.put(1, 20L);
    size = store.getSizeTillStreamCut(scope, stream, map2, null, executor).join();
    assertTrue(size == 40L);
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, size, map2);
    store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
    Map<Integer, Long> map3 = new HashMap<>();
    map3.put(0, 30L);
    map3.put(1, 30L);
    size = store.getSizeTillStreamCut(scope, stream, map3, null, executor).join();
    assertTrue(size == 60L);
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, 60L, map3);
    store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
    // endregion
    // region Size Computation on multiple epochs
    long scaleTs = System.currentTimeMillis();
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.5);
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.5, 1.0);
    List<Integer> scale1SealedSegments = Lists.newArrayList(0, 1);
    StartScaleResponse response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, false, null, executor).join();
    final List<Segment> scale1SegmentsCreated = response.getSegmentsCreated();
    store.setState(scope, stream, State.SCALING, null, executor).get();
    store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 40L)), scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    // complex stream cut - across two epochs
    Map<Integer, Long> map4 = new HashMap<>();
    map4.put(0, 40L);
    map4.put(3, 10L);
    size = store.getSizeTillStreamCut(scope, stream, map4, null, executor).join();
    assertTrue(size == 90L);
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, size, map4);
    store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
    // simple stream cut on epoch 2
    Map<Integer, Long> map5 = new HashMap<>();
    map5.put(2, 10L);
    map5.put(3, 10L);
    size = store.getSizeTillStreamCut(scope, stream, map5, null, executor).join();
    assertTrue(size == 100L);
    StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime + 30, size, map5);
    store.addStreamCutToRetentionSet(scope, stream, streamCut5, null, executor).get();
// endregion
}
Also used : Arrays(java.util.Arrays) AssertExtensions(io.pravega.test.common.AssertExtensions) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicReference(java.util.concurrent.atomic.AtomicReference) Lists(com.google.common.collect.Lists) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Timeout(org.junit.rules.Timeout) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SimpleEntry(java.util.AbstractMap.SimpleEntry) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) BucketChangeListener(io.pravega.controller.server.retention.BucketChangeListener) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) Before(org.junit.Before) Assert.assertNotNull(org.junit.Assert.assertNotNull) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) State(io.pravega.controller.store.stream.tables.State) Collectors(java.util.stream.Collectors) TxnResource(io.pravega.controller.store.task.TxnResource) Executors(java.util.concurrent.Executors) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Assert(org.junit.Assert) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Futures(io.pravega.common.concurrent.Futures) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) HashMap(java.util.HashMap) SimpleEntry(java.util.AbstractMap.SimpleEntry) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Test(org.junit.Test)

Example 5 with RetentionPolicy

use of io.pravega.client.stream.RetentionPolicy in project pravega by pravega.

the class ModelHelperTest method encodeStreamConfig.

@Test
public void encodeStreamConfig() {
    StreamConfiguration config = ModelHelper.encode(ModelHelper.decode(StreamConfiguration.builder().scope("scope").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(100, 2, 3)).retentionPolicy(RetentionPolicy.bySizeBytes(1000L)).build()));
    assertEquals("test", config.getStreamName());
    ScalingPolicy policy = config.getScalingPolicy();
    assertEquals(ScalingPolicy.ScaleType.BY_RATE_IN_EVENTS_PER_SEC, policy.getScaleType());
    assertEquals(100L, policy.getTargetRate());
    assertEquals(2, policy.getScaleFactor());
    assertEquals(3, policy.getMinNumSegments());
    RetentionPolicy retentionPolicy = config.getRetentionPolicy();
    assertEquals(RetentionPolicy.RetentionType.SIZE, retentionPolicy.getRetentionType());
    assertEquals(1000L, (long) retentionPolicy.getRetentionParam());
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Test(org.junit.Test)

Aggregations

RetentionPolicy (io.pravega.client.stream.RetentionPolicy)11 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)9 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)7 Test (org.junit.Test)7 HashMap (java.util.HashMap)4 AtomicReference (java.util.concurrent.atomic.AtomicReference)4 StreamCutRecord (io.pravega.controller.store.stream.StreamCutRecord)3 List (java.util.List)3 CompletionException (java.util.concurrent.CompletionException)3 Collectors (java.util.stream.Collectors)3 Preconditions (com.google.common.base.Preconditions)2 Exceptions (io.pravega.common.Exceptions)2 Futures (io.pravega.common.concurrent.Futures)2 ControllerEventStreamWriterMock (io.pravega.controller.mocks.ControllerEventStreamWriterMock)2 TaskExceptions (io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions)2 BucketChangeListener (io.pravega.controller.server.retention.BucketChangeListener)2 StreamTruncationRecord (io.pravega.controller.store.stream.tables.StreamTruncationRecord)2 Config (io.pravega.controller.util.Config)2 UpdateStreamEvent (io.pravega.shared.controller.event.UpdateStreamEvent)2 CompletableFuture (java.util.concurrent.CompletableFuture)2