Search in sources :

Example 6 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionTimeLimitWithOverlappingMinTest.

@Test(timeout = 30000)
public void consumptionBasedRetentionTimeLimitWithOverlappingMinTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(10), Duration.ofMillis(50));
    String stream1 = "consumptionSizeOverlap";
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    AtomicLong time = new AtomicLong(0L);
    streamMetadataTasks.setRetentionClock(time::get);
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
    // create a retention set that has 5 values
    // s0: 10: seg0/1, seg1/5 ==> time retained if truncated at = 10 <= min
    // s1: 20: seg0/1, seg1/6 ==> time retained if truncated at = 0
    time.set(10L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 5L, ImmutableMap.of(0L, 1L, 1L, 5L)), null, executor).join();
    time.set(20L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 6L, ImmutableMap.of(0L, 1L, 1L, 6L)), null, executor).join();
    // subscriber streamcut : 0/0, 1/10
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 0L, 1L, 10L), 0L).join();
    // overlap with min, no clear max. no truncation.
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertFalse(truncationRecord.getObject().isUpdating());
    // s0: 10: seg0/1, seg1/5 ==> time retained if truncated at = 40 <== max
    // s1: 20: seg0/1, seg1/6 ==> time retained if truncated at = 30
    // s2: 30: seg0/10, seg1/7 ==> time retained if truncated at = 20
    // s3: 40: seg0/10, seg1/8 ==> time retained if truncated at = 10  <== min
    // s4: 50: seg0/10, seg1/10
    time.set(30L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 17L, ImmutableMap.of(0L, 10L, 1L, 7L)), null, executor).join();
    time.set(40L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 18L, ImmutableMap.of(0L, 10L, 1L, 8L)), null, executor).join();
    time.set(50L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
    // subscriber streamcut: slb: seg0/9, seg1/10 ==> overlaps with min bound streamcut.
    // so we should actually truncate at streamcut before slb.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 9L, 1L, 10L), 0L).join();
    // this should truncate as s1. first streamcut before slb.
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 6L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Segment(io.pravega.client.segment.impl.Segment) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Stream(io.pravega.client.stream.Stream) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 7 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionSizeLimitTest.

@Test(timeout = 30000)
public void consumptionBasedRetentionSizeLimitTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(2L, 10L);
    String stream1 = "consumptionSize";
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    // region case 1: basic retention
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 2L, 1L, 1L), 0L).join();
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 1L, 1L, 2L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(0L, 2L);
    map1.put(1L, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 0/2, 1/2
    // subscriber lowerbound is 0/1, 1/1.. trucation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
    // endregion
    // region case 2 min policy check
    // we will update the new streamcut to 0/10, 1/10
    map1.put(0L, 2L);
    map1.put(1L, 2L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(20L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // update both readers to make sure they have read till the latest position. we have set the min limit to 2.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 2L, 1L, 2L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 2L, 1L, 2L), 0L).join();
    // no new truncation should happen.
    // verify that truncation record has not changed.
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 20L, null, "").join();
    // now retention set has two stream cut 0/2, 1/2...0/2, 1/2
    // subscriber lowerbound is 0/2, 1/2.. does not meet min bound criteria. we also do not have a max that satisfies the limit. no truncation should happen.
    // no change:
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
    assertFalse(truncationRecord.getObject().isUpdating());
    // endregion
    // region case 3: min criteria not met on lower bound. truncate at min.
    map1.put(0L, 10L);
    map1.put(1L, 10L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(30L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 10L, 1L, 9L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 10L, 1L, 9L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 30L, null, "").join();
    // now retention set has three stream cut 0/2, 1/2...0/2, 1/2... 0/10, 1/10
    // subscriber lowerbound is 0/10, 1/9.. does not meet min bound criteria. but we have min bound on truncation record
    // truncation should happen at 0/2, 1/2
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 2L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 2L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
    // endregion
    // region case 4: lowerbound behind max
    // now move the stream further ahead so that max truncation limit is crossed but lowerbound is behind max.
    map1.put(0L, 20L);
    map1.put(1L, 20L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(40L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 40L, null, "").join();
    // now retention set has four stream cut 0/2, 1/2...0/2, 1/2... 0/10, 1/10.. 0/20, 1/20
    // subscriber lowerbound is 0/10, 1/9.. meets min bound criteria. but goes beyond the max criteria.
    // no streamcut can be chosen from the available stream cuts in retention set without breaking either min or max criteria.
    // in this case max will be chosen as min with 0/10, 1/10.. this will be compared with subscriber lowerbound and whichever
    // purges more data will be chosen.
    // so truncation should happen at 0/10, 1/10
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 10L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 10L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
    // endregion
    // region case 5: lowerbound overlaps is beyond max but there is no clear max streamcut available in retention set
    map1.put(0L, 30L);
    map1.put(1L, 30L);
    size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(50L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 21L, 1L, 19L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 21L, 1L, 19L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 50L, null, "").join();
    // now retention set has five stream cut 0/2, 1/2...0/2, 1/2... 0/10, 1/10.. 0/20, 1/20.. 0/30, 1/30
    // subscriber lowerbound is 0/21, 1/19.. meets min bound criteria. and its also greater than max bound.
    // but max bound streamcut cannot be chosen from retention set. same as previous case..
    // but this time we have a min bound and max bound as 0/20, 1/20.
    // truncation should happen at lowerbound as data retained is identical for lowerbound and streamcut from retentionset.
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 21L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 19L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Stream(io.pravega.client.stream.Stream) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 8 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithScale.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithScale() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(3);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(0L, 1000L);
    String stream1 = "consumptionSize";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    assertEquals(0L, createResponse1.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    assertEquals(0L, createResponse2.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    // example::
    // | s0 | s3      |
    // |    | s4 |    | s6
    // | s1      | s5 |
    // | s2      |    |
    // valid stream cuts: { s0/off, s5/-1 }, { s0/off, s2/off, s5/-1 }
    // lower bound = { s0/off, s2/off, s5/-1 }
    // valid stream cuts: { s0/off, s5/-1 }, { s0/off, s2/off, s5/-1 }, { s0/off, s1/off, s2/off }
    // lower bound = { s0/off, s1/off, s2/off }
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 1);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    // 0 split to 3 and 4
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0 / 6), new AbstractMap.SimpleEntry<>(1.0 / 6, 1.0 / 3)));
    // 4, 1, 2 merged to 5
    scale(SCOPE, stream1, ImmutableMap.of(1L, 1L, 2L, 2L, four, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(1.0 / 6, 1.0)));
    // merge 3, 5 to 6
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L, five, 2L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, five, -1L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, 2L, 1L, five, -1L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(six, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 6/2
    // subscriber lowerbound is 0/1, 2/1, 5/-1.. trucation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(2L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(five).longValue(), -1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 9 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionTimeLimitTest.

@Test(timeout = 30000)
public void consumptionBasedRetentionTimeLimitTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(1L), Duration.ofMillis(10L));
    String stream1 = "consumptionTime";
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    AtomicLong time = new AtomicLong(0L);
    streamMetadataTasks.setRetentionClock(time::get);
    // region case 1: basic retention
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
    assertEquals(0L, createResponse1.getConfig().getGeneration());
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
    assertEquals(0L, createResponse2.getConfig().getGeneration());
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 2L, 1L, 1L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, 1L, 2L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(0L, 2L);
    map1.put(1L, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(time.get(), size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    // now retention set has one stream cut 0/2, 1/2, recording time 1L
    // subscriber lowerbound is 0/1, 1/1.. trucation should not happen as this lowerbound is ahead of min retention streamcut.
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertFalse(truncationRecord.getObject().isUpdating());
    // endregion
    // region case 2 min policy check
    // subscriber streamcut > min time streamcut
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(2L, 4L, ImmutableMap.of(0L, 2L, 1L, 2L)), null, executor).join();
    time.set(10L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
    time.set(11L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
    // retentionset: 0L: 0L/2L, 1L/2L... 2L: 0L/2L, 1L/2L... 10L: 0/10, 1/10....11L: 0/10, 1/10.
    // update both readers to 0/3, 1/3.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 3L, 1L, 3L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 3L, 1L, 3L), 0L).join();
    // new truncation should happen at subscriber lowerbound.
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 3L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 3L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
    // endregion
    // region case 3: min criteria not met on lower bound. truncate at max.
    time.set(20L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 22L, ImmutableMap.of(0L, 11L, 1L, 11L)), null, executor).join();
    // update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    // retentionset: 0L: 0L/2L, 1L/2L... 2L: 0L/2L, 1L/2L... 10L: 0/10, 1/10....11L: 0/10, 1/10... 20L: 0/11, 1/11
    // subscriber lowerbound is 0/11, 1/11
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    // truncate at limit min
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 10L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 10L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
    // endregion
    // region case 4: lowerbound behind max
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(30L, 40L, ImmutableMap.of(0L, 20L, 1L, 20L)), null, executor).join();
    time.set(40L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 42L, ImmutableMap.of(0L, 21L, 1L, 21L)), null, executor).join();
    // update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 11L, 1L, 11L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    // now retention set has five stream cuts 1: 0/2, 1/2...10: 0/10, 1/10... 20: 0/11, 1/11.. 30: 0/20, 1/20.. 40L: 0/21, 1/21
    // subscriber lowerbound is 0/11, 1/11 ..
    // maxbound = 30. truncate at max
    // maxlimit = 30
    // lowerbound is behind maxbound. we will truncate at max
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 20L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 20L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
    // endregion
    // region case 5: lowerbound overlaps with maxbound
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(50L, 43L, ImmutableMap.of(0L, 21L, 1L, 22L)), null, executor).join();
    time.set(59L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 60L, ImmutableMap.of(0L, 30L, 1L, 30L)), null, executor).join();
    time.set(60L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 60L, ImmutableMap.of(0L, 30L, 1L, 30L)), null, executor).join();
    // update both readers to make sure they have read till the latest position - 1. we have set the min limit to 2.
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 22L, 1L, 21L), 0L).join();
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 22L, 1L, 21L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    // now retention set has eight stream cuts 1: 0/2, 1/2...10: 0/10, 1/10... 20: 0/11, 1/11.. 30: 0/20, 1/20.. 40L: 0/21, 1/21
    // 50: 0/21, 1/22 ... 59: 0/30, 1/30.. 60: 0/30, 1/30
    // subscriber lowerbound is 0/22, 1/21
    // max: 50, limit: 50
    // this overlaps with max. so truncate at max (50: 0/21, 1/22)
    truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 21L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 22L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
// endregion
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 10 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamTestBase method truncationTest.

@Test(timeout = 30000L)
public void truncationTest() {
    OperationContext context = getContext();
    int startingSegmentNumber = new Random().nextInt(2000);
    // epoch 0 --> 0, 1
    long timestamp = System.currentTimeMillis();
    PersistentStreamBase stream = createStream("scope", "stream" + startingSegmentNumber, timestamp, 2, startingSegmentNumber);
    List<StreamSegmentRecord> activeSegments = stream.getActiveSegments(context).join();
    // epoch 1 --> 0, 2, 3
    List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
    Map<Long, Long> map = new HashMap<>();
    map.put(startingSegmentNumber + 1L, 100L);
    scaleStream(stream, ++timestamp, Lists.newArrayList(startingSegmentNumber + 1L), newRanges, map);
    long twoSegmentId = computeSegmentId(startingSegmentNumber + 2, 1);
    long threeSegmentId = computeSegmentId(startingSegmentNumber + 3, 1);
    // epoch 2 --> 0, 2, 4, 5
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.75, (0.75 + 1.0) / 2));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>((0.75 + 1.0) / 2, 1.0));
    map = new HashMap<>();
    map.put(threeSegmentId, 100L);
    scaleStream(stream, ++timestamp, Lists.newArrayList(threeSegmentId), newRanges, map);
    long fourSegmentId = computeSegmentId(startingSegmentNumber + 4, 2);
    long fiveSegmentId = computeSegmentId(startingSegmentNumber + 5, 2);
    // epoch 3 --> 0, 4, 5, 6, 7
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.5, (0.75 + 0.5) / 2));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>((0.75 + 0.5) / 2, 0.75));
    map = new HashMap<>();
    map.put(twoSegmentId, 100L);
    scaleStream(stream, ++timestamp, Lists.newArrayList(twoSegmentId), newRanges, map);
    long sixSegmentId = computeSegmentId(startingSegmentNumber + 6, 3);
    long sevenSegmentId = computeSegmentId(startingSegmentNumber + 7, 3);
    // epoch 4 --> 4, 5, 6, 7, 8, 9
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>(0.0, (0.0 + 0.5) / 2));
    newRanges.add(new AbstractMap.SimpleEntry<Double, Double>((0.0 + 0.5) / 2, 0.5));
    map = new HashMap<>();
    map.put(startingSegmentNumber + 0L, 100L);
    scaleStream(stream, ++timestamp, Lists.newArrayList(startingSegmentNumber + 0L), newRanges, map);
    long eightSegmentId = computeSegmentId(startingSegmentNumber + 8, 4);
    long nineSegmentId = computeSegmentId(startingSegmentNumber + 9, 4);
    // first stream cut
    Map<Long, Long> streamCut1 = new HashMap<>();
    streamCut1.put(startingSegmentNumber + 0L, 1L);
    streamCut1.put(startingSegmentNumber + 1L, 1L);
    stream.startTruncation(streamCut1, context).join();
    VersionedMetadata<StreamTruncationRecord> versionedTruncationRecord = stream.getTruncationRecord(context).join();
    StreamTruncationRecord truncationRecord = versionedTruncationRecord.getObject();
    assertTrue(truncationRecord.getToDelete().isEmpty());
    assertEquals(truncationRecord.getStreamCut(), streamCut1);
    Map<Long, Integer> transform = transform(truncationRecord.getSpan());
    assertTrue(transform.get(startingSegmentNumber + 0L) == 0 && transform.get(startingSegmentNumber + 1L) == 0);
    stream.completeTruncation(versionedTruncationRecord, context).join();
    // getActiveSegments wrt first truncation record which is on epoch 0
    Map<Long, Long> activeSegmentsWithOffset;
    // 1. truncationRecord = 0/1, 1/1
    // expected active segments with offset = 0/1, 1/1
    activeSegmentsWithOffset = stream.getSegmentsAtHead(context).join().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(activeSegmentsWithOffset.size() == 2 && activeSegmentsWithOffset.containsKey(startingSegmentNumber + 0L) && activeSegmentsWithOffset.containsKey(startingSegmentNumber + 1L) && activeSegmentsWithOffset.get(startingSegmentNumber + 0L) == 1L && activeSegmentsWithOffset.get(startingSegmentNumber + 1L) == 1L);
    // second stream cut
    Map<Long, Long> streamCut2 = new HashMap<>();
    streamCut2.put(startingSegmentNumber + 0L, 1L);
    streamCut2.put(twoSegmentId, 1L);
    streamCut2.put(fourSegmentId, 1L);
    streamCut2.put(fiveSegmentId, 1L);
    stream.startTruncation(streamCut2, context).join();
    versionedTruncationRecord = stream.getTruncationRecord(context).join();
    truncationRecord = versionedTruncationRecord.getObject();
    assertEquals(truncationRecord.getStreamCut(), streamCut2);
    assertTrue(truncationRecord.getToDelete().size() == 2 && truncationRecord.getToDelete().contains(startingSegmentNumber + 1L) && truncationRecord.getToDelete().contains(threeSegmentId));
    assertTrue(truncationRecord.getStreamCut().equals(streamCut2));
    transform = transform(truncationRecord.getSpan());
    assertTrue(transform.get(startingSegmentNumber + 0L) == 2 && transform.get(twoSegmentId) == 2 && transform.get(fourSegmentId) == 2 && transform.get(fiveSegmentId) == 2);
    stream.completeTruncation(versionedTruncationRecord, context).join();
    // 2. truncationRecord = 0/1, 2/1, 4/1, 5/1.
    // expected active segments = 0/1, 2/1, 4/1, 5/1
    activeSegmentsWithOffset = stream.getSegmentsAtHead(context).join().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(activeSegmentsWithOffset.size() == 4 && activeSegmentsWithOffset.containsKey(startingSegmentNumber + 0L) && activeSegmentsWithOffset.containsKey(twoSegmentId) && activeSegmentsWithOffset.containsKey(fourSegmentId) && activeSegmentsWithOffset.containsKey(fiveSegmentId) && activeSegmentsWithOffset.get(startingSegmentNumber + 0L) == 1L && activeSegmentsWithOffset.get(twoSegmentId) == 1L && activeSegmentsWithOffset.get(fourSegmentId) == 1L && activeSegmentsWithOffset.get(fiveSegmentId) == 1L);
    // third stream cut
    Map<Long, Long> streamCut3 = new HashMap<>();
    streamCut3.put(twoSegmentId, 10L);
    streamCut3.put(fourSegmentId, 10L);
    streamCut3.put(fiveSegmentId, 10L);
    streamCut3.put(eightSegmentId, 10L);
    streamCut3.put(nineSegmentId, 10L);
    stream.startTruncation(streamCut3, context).join();
    versionedTruncationRecord = stream.getTruncationRecord(context).join();
    truncationRecord = versionedTruncationRecord.getObject();
    assertEquals(truncationRecord.getStreamCut(), streamCut3);
    assertTrue(truncationRecord.getToDelete().size() == 1 && truncationRecord.getToDelete().contains(startingSegmentNumber + 0L));
    assertTrue(truncationRecord.getStreamCut().equals(streamCut3));
    transform = transform(truncationRecord.getSpan());
    assertTrue(transform.get(twoSegmentId) == 2 && transform.get(fourSegmentId) == 4 && transform.get(fiveSegmentId) == 4 && transform.get(eightSegmentId) == 4 && transform.get(nineSegmentId) == 4);
    stream.completeTruncation(versionedTruncationRecord, context).join();
    // 3. truncation record 2/10, 4/10, 5/10, 8/10, 9/10
    // getActiveSegments wrt first truncation record which spans epoch 2 to 4
    // expected active segments = 2/10, 4/10, 5/10, 8/10, 9/10
    activeSegmentsWithOffset = stream.getSegmentsAtHead(context).join().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(activeSegmentsWithOffset.size() == 5 && activeSegmentsWithOffset.containsKey(twoSegmentId) && activeSegmentsWithOffset.containsKey(fourSegmentId) && activeSegmentsWithOffset.containsKey(fiveSegmentId) && activeSegmentsWithOffset.containsKey(eightSegmentId) && activeSegmentsWithOffset.containsKey(nineSegmentId) && activeSegmentsWithOffset.get(twoSegmentId) == 10L && activeSegmentsWithOffset.get(fourSegmentId) == 10L && activeSegmentsWithOffset.get(fiveSegmentId) == 10L && activeSegmentsWithOffset.get(eightSegmentId) == 10L && activeSegmentsWithOffset.get(nineSegmentId) == 10L);
    // behind previous
    Map<Long, Long> streamCut4 = new HashMap<>();
    streamCut4.put(twoSegmentId, 1L);
    streamCut4.put(fourSegmentId, 1L);
    streamCut4.put(fiveSegmentId, 1L);
    streamCut4.put(eightSegmentId, 1L);
    streamCut4.put(nineSegmentId, 1L);
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.startTruncation(streamCut4, context), e -> e instanceof IllegalArgumentException);
    Map<Long, Long> streamCut5 = new HashMap<>();
    streamCut5.put(twoSegmentId, 10L);
    streamCut5.put(fourSegmentId, 10L);
    streamCut5.put(fiveSegmentId, 10L);
    streamCut5.put(startingSegmentNumber + 0L, 10L);
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.startTruncation(streamCut5, context), e -> e instanceof IllegalArgumentException);
}
Also used : TestOperationContext(io.pravega.controller.store.TestOperationContext) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) AbstractMap(java.util.AbstractMap) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) Random(java.util.Random) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Test(org.junit.Test)

Aggregations

StreamTruncationRecord (io.pravega.controller.store.stream.records.StreamTruncationRecord)24 Test (org.junit.Test)19 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)17 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)16 HashMap (java.util.HashMap)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)15 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)15 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)13 ControllerEventStreamWriterMock (io.pravega.controller.mocks.ControllerEventStreamWriterMock)12 EventStreamWriterMock (io.pravega.controller.mocks.EventStreamWriterMock)12 StreamCutRecord (io.pravega.controller.store.stream.records.StreamCutRecord)12 RetentionPolicy (io.pravega.client.stream.RetentionPolicy)11 RGStreamCutRecord (io.pravega.shared.controller.event.RGStreamCutRecord)11 Segment (io.pravega.client.segment.impl.Segment)10 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)10 Stream (io.pravega.client.stream.Stream)10 StreamCut (io.pravega.client.stream.StreamCut)10 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)10 StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)9 VersionedMetadata (io.pravega.controller.store.VersionedMetadata)7