Search in sources :

Example 16 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class TruncateStreamTask method processTruncate.

private CompletableFuture<Void> processTruncate(String scope, String stream, VersionedMetadata<StreamTruncationRecord> versionedTruncationRecord, VersionedMetadata<State> versionedState, OperationContext context, long requestId) {
    String delegationToken = this.streamMetadataTasks.retrieveDelegationToken();
    StreamTruncationRecord truncationRecord = versionedTruncationRecord.getObject();
    log.info(requestId, "Truncating stream {}/{} at stream cut: {}", scope, stream, truncationRecord.getStreamCut());
    return Futures.toVoid(streamMetadataStore.updateVersionedState(scope, stream, State.TRUNCATING, versionedState, context, executor).thenCompose(update -> notifyTruncateSegments(scope, stream, truncationRecord.getStreamCut(), delegationToken, requestId).thenCompose(x -> notifyDeleteSegments(scope, stream, truncationRecord.getToDelete(), delegationToken, requestId)).thenAccept(x -> DYNAMIC_LOGGER.reportGaugeValue(TRUNCATED_SIZE, versionedTruncationRecord.getObject().getSizeTill(), streamTags(scope, stream))).thenCompose(deleted -> streamMetadataStore.completeTruncation(scope, stream, versionedTruncationRecord, context, executor)).thenCompose(x -> streamMetadataStore.updateVersionedState(scope, stream, State.ACTIVE, update, context, executor))));
}
Also used : OperationContext(io.pravega.controller.store.stream.OperationContext) NameUtils(io.pravega.shared.NameUtils) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) TruncateStreamEvent(io.pravega.shared.controller.event.TruncateStreamEvent) LoggerFactory(org.slf4j.LoggerFactory) Set(java.util.Set) CompletableFuture(java.util.concurrent.CompletableFuture) Collectors(java.util.stream.Collectors) MetricsTags.streamTags(io.pravega.shared.MetricsTags.streamTags) MetricsProvider(io.pravega.shared.metrics.MetricsProvider) TagLogger(io.pravega.common.tracing.TagLogger) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) DynamicLogger(io.pravega.shared.metrics.DynamicLogger) TRUNCATED_SIZE(io.pravega.shared.MetricsNames.TRUNCATED_SIZE) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) Preconditions(com.google.common.base.Preconditions) State(io.pravega.controller.store.stream.State) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) Futures(io.pravega.common.concurrent.Futures) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord)

Example 17 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithNoBounds.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithNoBounds() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(0L), Duration.ofMillis(Long.MAX_VALUE));
    String stream1 = "consumptionSize3";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    assertEquals(0L, createResponse1.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    assertEquals(0L, createResponse2.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    // example::
    // | s0 | s2           | s7 |
    // |    |              |
    // |    |              |
    // |    |    | s4 | s6 | s8 | s10
    // | s1 | s3 | s5 |    | s9 |
    // valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
    // lower bound = { s0/off, s1/off }
    long two = NameUtils.computeSegmentId(2, 1);
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 2);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    long seven = NameUtils.computeSegmentId(7, 4);
    long eight = NameUtils.computeSegmentId(8, 4);
    long nine = NameUtils.computeSegmentId(9, 4);
    long ten = NameUtils.computeSegmentId(10, 5);
    // 0, 1 -> 2, 3 with different split
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // s3 -> 4, 5
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
    // 4,5 -> 6
    scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 2, 6 -> 7, 8, 9
    scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 7, 8, 9 -> 10
    scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    // invalid streamcut should be rejected
    UpdateSubscriberStatus.Status status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, three, 1L), 0L).join();
    assertEquals(status, UpdateSubscriberStatus.Status.STREAM_CUT_NOT_VALID);
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, two, -1L, eight, -1L, nine, 1L), 0L).join();
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(1L, 1L, two, -1L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(ten, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 10/2
    // subscriber lowerbound is 0/1, 1/1.. truncation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 18 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithScale2.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithScale2() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(0L, 1000L);
    String stream1 = "consumptionSize2";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    // example::
    // | s0 | s2           | s7 |
    // |    |              |
    // |    |              |
    // |    |    | s4 | s6 | s8 | s10
    // | s1 | s3 | s5 |    | s9 |
    // valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
    // lower bound = { s0/off, s1/off }
    long two = NameUtils.computeSegmentId(2, 1);
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 2);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    long seven = NameUtils.computeSegmentId(7, 4);
    long eight = NameUtils.computeSegmentId(8, 4);
    long nine = NameUtils.computeSegmentId(9, 4);
    long ten = NameUtils.computeSegmentId(10, 5);
    // 0, 1 -> 2, 3 with different split
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // s3 -> 4, 5
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
    // 4,5 -> 6
    scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 2, 6 -> 7, 8, 9
    scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 7, 8, 9 -> 10
    scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    // invalid streamcut should be rejected
    UpdateSubscriberStatus.Status status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, three, 1L), 0L).join();
    assertEquals(status, UpdateSubscriberStatus.Status.STREAM_CUT_NOT_VALID);
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, two, -1L, eight, -1L, nine, 1L), 0L).join();
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(1L, 1L, two, -1L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(ten, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 10/2
    // subscriber lowerbound is 0/1, 1/1.. trucation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 19 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionSizeLimitWithOverlappingMinTest.

@Test(timeout = 30000)
public void consumptionBasedRetentionSizeLimitWithOverlappingMinTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(2L, 20L);
    String stream1 = "consumptionSizeOverlap";
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    AtomicLong time = new AtomicLong(0L);
    streamMetadataTasks.setRetentionClock(time::get);
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
    // create a retention set that has 5 values
    // retention policy where min = 2, max = 10.
    // s0: seg0/10, seg1/10 ==> size retained if truncated at = 0
    // s1: seg0/10, seg1/8 ==> size retained if truncated at = 2  <== min
    // s2: seg0/10, seg1/7 ==> size retained if truncated at = 3
    // s3: seg0/0, seg1/6 ==> size retained if truncated at = 14
    // s4: seg0/0, seg1/5 ==> size retained if truncated at = 15  <== max
    time.set(10L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 5L, ImmutableMap.of(0L, 0L, 1L, 5L)), null, executor).join();
    time.set(20L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 6L, ImmutableMap.of(0L, 0L, 1L, 6L)), null, executor).join();
    time.set(30L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 17L, ImmutableMap.of(0L, 10L, 1L, 7L)), null, executor).join();
    time.set(40L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 18L, ImmutableMap.of(0L, 10L, 1L, 8L)), null, executor).join();
    time.set(50L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
    // subscriber streamcut: slb: seg0/9, seg1/10 ==> size retained if truncated at = 1.
    // this is less than min. so we should truncate at min. but min overlaps with slb.
    // so we should actually truncate at s3 which is the streamcut just before slb.
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 9L, 1L, 10L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 0L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 6L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Segment(io.pravega.client.segment.impl.Segment) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Stream(io.pravega.client.stream.Stream) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 20 with StreamTruncationRecord

use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.

the class StreamMetadataTasksTest method timeBasedRetentionStreamTest.

@Test(timeout = 30000)
public void timeBasedRetentionStreamTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofMinutes(60).toMillis()).build();
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    doAnswer(x -> CompletableFuture.completedFuture(Collections.emptyList())).when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
    streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    AtomicLong time = new AtomicLong(System.currentTimeMillis());
    streamMetadataTasks.setRetentionClock(time::get);
    long recordingTime1 = time.get();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(0L, 1L);
    map1.put(1L, 1L);
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, Long.MIN_VALUE, ImmutableMap.copyOf(map1));
    doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime1, null, "").get();
    // verify that one streamCut is generated and added.
    List<StreamCutRecord> list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    assertTrue(list.contains(streamCut1));
    Map<Long, Long> map2 = new HashMap<>();
    map2.put(0L, 10L);
    map2.put(1L, 10L);
    long recordingTime2 = recordingTime1 + Duration.ofMinutes(5).toMillis();
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, Long.MIN_VALUE, ImmutableMap.copyOf(map2));
    doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), // mock only isTransactionOngoing call.
    any());
    time.set(recordingTime2);
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime2, null, "").get();
    list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
    // verify that only one stream cut is in retention set. streamCut2 is not added
    // verify that truncation did not happen
    assertTrue(list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(!truncProp.isUpdating());
    Map<Long, Long> map3 = new HashMap<>();
    map3.put(0L, 20L);
    map3.put(1L, 20L);
    long recordingTime3 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
    doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), // mock only isTransactionOngoing call.
    any());
    time.set(recordingTime3);
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime3, null, "").get();
    // verify two stream cuts are in retention set. Cut 1 and 3.
    // verify that Truncation not not happened.
    list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
    assertTrue(list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(!truncProp.isUpdating());
    Map<Long, Long> map4 = new HashMap<>();
    map4.put(0L, 20L);
    map4.put(1L, 20L);
    long recordingTime4 = recordingTime1 + retentionPolicy.getRetentionParam() + 2;
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, Long.MIN_VALUE, ImmutableMap.copyOf(map4));
    doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    time.set(recordingTime4);
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime4, null, "").get();
    // verify that only two stream cut are in retention set. streamcut 3 and 4
    // verify that truncation has started. verify that streamCut1 is removed from retention set as that has been used for truncation
    list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
    assertTrue(!list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getStreamCut().get(0L) == 1L && truncProp.getStreamCut().get(1L) == 1L);
    doCallRealMethod().when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
}
Also used : UpdateStreamEvent(io.pravega.shared.controller.event.UpdateStreamEvent) Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AbstractStreamMetadataStore(io.pravega.controller.store.stream.AbstractStreamMetadataStore) StoreException(io.pravega.controller.store.stream.StoreException) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Duration(java.time.Duration) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) LockFailedException(io.pravega.controller.store.task.LockFailedException) Mockito.doReturn(org.mockito.Mockito.doReturn) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamMetadataStoreTestHelper(io.pravega.controller.store.stream.StreamMetadataStoreTestHelper) DeleteStreamEvent(io.pravega.shared.controller.event.DeleteStreamEvent) RequestTracker(io.pravega.common.tracing.RequestTracker) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) ControllerEvent(io.pravega.shared.controller.event.ControllerEvent) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) Assert.assertFalse(org.junit.Assert.assertFalse) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) Futures(io.pravega.common.concurrent.Futures) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) Mockito.mock(org.mockito.Mockito.mock) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) Mockito.doCallRealMethod(org.mockito.Mockito.doCallRealMethod) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) Mock(org.mockito.Mock) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) TruncateStreamEvent(io.pravega.shared.controller.event.TruncateStreamEvent) Mockito.spy(org.mockito.Mockito.spy) Supplier(java.util.function.Supplier) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) ArrayList(java.util.ArrayList) Lists(com.google.common.collect.Lists) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) EventHelper(io.pravega.controller.task.EventHelper) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) CreateReaderGroupEvent(io.pravega.shared.controller.event.CreateReaderGroupEvent) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) EventHelperMock(io.pravega.controller.mocks.EventHelperMock) Assert.assertTrue(org.junit.Assert.assertTrue) CreateStreamResponse(io.pravega.controller.store.stream.CreateStreamResponse) Test(org.junit.Test) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) Assert.assertNull(org.junit.Assert.assertNull) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) DeleteReaderGroupStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteReaderGroupStatus) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) CommitEvent(io.pravega.shared.controller.event.CommitEvent) AssertExtensions(io.pravega.test.common.AssertExtensions) DeleteReaderGroupEvent(io.pravega.shared.controller.event.DeleteReaderGroupEvent) TimeoutException(java.util.concurrent.TimeoutException) SealStreamEvent(io.pravega.shared.controller.event.SealStreamEvent) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Stream(io.pravega.client.stream.Stream) After(org.junit.After) SubscribersResponse(io.pravega.controller.stream.api.grpc.v1.Controller.SubscribersResponse) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) ImmutableSet(com.google.common.collect.ImmutableSet) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) ImmutableMap(com.google.common.collect.ImmutableMap) AssertExtensions.assertFutureThrows(io.pravega.test.common.AssertExtensions.assertFutureThrows) DeleteScopeEvent(io.pravega.shared.controller.event.DeleteScopeEvent) CompletionException(java.util.concurrent.CompletionException) ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) UUID(java.util.UUID) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) AbstractClientFactoryImpl(io.pravega.client.stream.impl.AbstractClientFactoryImpl) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Config(io.pravega.controller.util.Config) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) TxnStatus(io.pravega.controller.store.stream.TxnStatus) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) Optional(java.util.Optional) DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) NotImplementedException(org.apache.commons.lang3.NotImplementedException) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) OperationContext(io.pravega.controller.store.stream.OperationContext) Segment(io.pravega.client.segment.impl.Segment) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Getter(lombok.Getter) SegmentHelper(io.pravega.controller.server.SegmentHelper) ModelHelper(io.pravega.client.control.impl.ModelHelper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) BucketStore(io.pravega.controller.store.stream.BucketStore) AbortEvent(io.pravega.shared.controller.event.AbortEvent) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) LinkedList(java.util.LinkedList) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ScaleResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) ControllerService(io.pravega.controller.server.ControllerService) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) Data(lombok.Data) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) UpdateReaderGroupEvent(io.pravega.shared.controller.event.UpdateReaderGroupEvent) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) ReaderGroupConfigResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ReaderGroupConfigResponse) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) Collections(java.util.Collections) ClientConfig(io.pravega.client.ClientConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) HashMap(java.util.HashMap) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Aggregations

StreamTruncationRecord (io.pravega.controller.store.stream.records.StreamTruncationRecord)24 Test (org.junit.Test)19 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)17 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)16 HashMap (java.util.HashMap)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)15 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)15 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)13 ControllerEventStreamWriterMock (io.pravega.controller.mocks.ControllerEventStreamWriterMock)12 EventStreamWriterMock (io.pravega.controller.mocks.EventStreamWriterMock)12 StreamCutRecord (io.pravega.controller.store.stream.records.StreamCutRecord)12 RetentionPolicy (io.pravega.client.stream.RetentionPolicy)11 RGStreamCutRecord (io.pravega.shared.controller.event.RGStreamCutRecord)11 Segment (io.pravega.client.segment.impl.Segment)10 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)10 Stream (io.pravega.client.stream.Stream)10 StreamCut (io.pravega.client.stream.StreamCut)10 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)10 StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)9 VersionedMetadata (io.pravega.controller.store.VersionedMetadata)7