Search in sources :

Example 61 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class EndToEndCBRTest method testReaderGroupManualRetention.

@Test(timeout = 60000)
public void testReaderGroupManualRetention() throws Exception {
    String scope = "test";
    String streamName = "testReaderGroupManualRetention";
    String groupName = "testReaderGroupManualRetention-group";
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(RetentionPolicy.bySizeBytes(10, Long.MAX_VALUE)).build();
    LocalController controller = (LocalController) PRAVEGA.getLocalController();
    controller.createScope(scope).get();
    controller.createStream(scope, streamName, config).get();
    Stream stream = Stream.of(scope, streamName);
    @Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
    @Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
    // write events
    @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
    writer.writeEvent("1", "e1").join();
    writer.writeEvent("2", "e2").join();
    // Create a ReaderGroup
    @Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
    groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).stream(stream).build());
    // Create a Reader
    AtomicLong clock = new AtomicLong();
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build(), clock::get, clock::get);
    clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
    EventRead<String> read = reader.readNextEvent(60000);
    assertEquals("e1", read.getEvent());
    clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
    read = reader.readNextEvent(60000);
    assertEquals("e2", read.getEvent());
    ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
    Map<Segment, Long> segmentMap = new HashMap<>();
    segmentMap.put(new Segment(scope, streamName, 0), 17L);
    Map<Stream, StreamCut> scResult2 = new HashMap<>();
    scResult2.put(stream, new StreamCutImpl(stream, segmentMap));
    readerGroup.updateRetentionStreamCut(scResult2);
    AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(stream, 0L).join().values().stream().anyMatch(off -> off > 0), 30 * 1000L);
    String group2 = groupName + "2";
    groupManager.createReaderGroup(group2, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(NameUtils.getScopedStreamName(scope, streamName)).build());
    EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group2, serializer, ReaderConfig.builder().build());
    EventRead<String> eventRead2 = reader2.readNextEvent(10000);
    assertEquals("e2", eventRead2.getEvent());
}
Also used : Segment(io.pravega.client.segment.impl.Segment) StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) AssertExtensions(io.pravega.test.common.AssertExtensions) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) Cleanup(lombok.Cleanup) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ReaderGroup(io.pravega.client.stream.ReaderGroup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventRead(io.pravega.client.stream.EventRead) LocalController(io.pravega.controller.server.eventProcessor.LocalController) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Stream(io.pravega.client.stream.Stream) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) Map(java.util.Map) Checkpoint(io.pravega.client.stream.Checkpoint) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) NameUtils(io.pravega.shared.NameUtils) Assert.assertTrue(org.junit.Assert.assertTrue) EventStreamReader(io.pravega.client.stream.EventStreamReader) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) Assert.assertNull(org.junit.Assert.assertNull) Assert.assertFalse(org.junit.Assert.assertFalse) InlineExecutor(io.pravega.test.common.InlineExecutor) ReaderConfig(io.pravega.client.stream.ReaderConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ClientConfig(io.pravega.client.ClientConfig) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) HashMap(java.util.HashMap) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ReaderGroup(io.pravega.client.stream.ReaderGroup) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) Cleanup(lombok.Cleanup) Segment(io.pravega.client.segment.impl.Segment) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) AtomicLong(java.util.concurrent.atomic.AtomicLong) LocalController(io.pravega.controller.server.eventProcessor.LocalController) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) Stream(io.pravega.client.stream.Stream) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) Test(org.junit.Test)

Example 62 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class EndToEndReaderGroupTest method testGenerateStreamCuts.

@Test(timeout = 30000)
public void testGenerateStreamCuts() throws Exception {
    String streamName = "testGenerateStreamCuts";
    final Stream stream = Stream.of(SCOPE, streamName);
    final String group = "testGenerateStreamCuts-group";
    createScope(SCOPE);
    createStream(SCOPE, streamName, ScalingPolicy.fixed(1));
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
    @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
    // Prep the stream with data.
    // 1.Write events with event size of 30
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join();
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
    groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000).stream(stream).build());
    ReaderGroup readerGroup = groupManager.getReaderGroup(group);
    // Create a reader
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build());
    readAndVerify(reader, 1);
    @Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
    CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
    // The reader group state will be updated after 1 second.
    TimeUnit.SECONDS.sleep(1);
    EventRead<String> data = reader.readNextEvent(15000);
    // wait until the streamCut is obtained.
    assertTrue(Futures.await(sc));
    // expected segment 0 offset is 30L.
    Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(streamName, 0, 0), 30L);
    Map<Stream, StreamCut> scMap = sc.join();
    assertEquals("StreamCut for a single stream expected", 1, scMap.size());
    assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap), scMap.get(stream));
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ReaderGroup(io.pravega.client.stream.ReaderGroup) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Segment(io.pravega.client.segment.impl.Segment) InlineExecutor(io.pravega.test.common.InlineExecutor) Stream(io.pravega.client.stream.Stream) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 63 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class EndToEndReaderGroupTest method testReaderOfflineWithSilentCheckpoint.

@Test(timeout = 30000)
public void testReaderOfflineWithSilentCheckpoint() throws Exception {
    String streamName = "testReaderOfflineWithSilentCheckpoint";
    final Stream stream = Stream.of(SCOPE, streamName);
    final String group = "testReaderOfflineWithSilentCheckpoint-group";
    @Cleanup("shutdown") InlineExecutor backgroundExecutor = new InlineExecutor();
    createScope(SCOPE);
    createStream(SCOPE, streamName, ScalingPolicy.fixed(1));
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
    @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
    // Prep the stream with data.
    // 1.Write events with event size of 30
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(1)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(2)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(3)).join();
    writer.writeEvent(randomKeyGenerator.get(), getEventData.apply(4)).join();
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, PRAVEGA.getControllerURI());
    groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(1000).stream(stream).build());
    ReaderGroup readerGroup = groupManager.getReaderGroup(group);
    // Create a reader
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build());
    // 2. Read an event.
    readAndVerify(reader, 1);
    // 3. Trigger a checkpoint and verify it is completed.
    CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("chk1", backgroundExecutor);
    // The reader group state will be updated after 1 second.
    TimeUnit.SECONDS.sleep(1);
    EventRead<String> data = reader.readNextEvent(15000);
    assertTrue(data.isCheckpoint());
    readAndVerify(reader, 2);
    assertTrue("Checkpointing should complete successfully", Futures.await(checkpoint));
    // 4. GenerateStreamCuts and validate the offset of stream cut.
    CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
    // The reader group state will be updated after 1 second.
    TimeUnit.SECONDS.sleep(1);
    data = reader.readNextEvent(15000);
    assertTrue("StreamCut generation should complete successfully", Futures.await(sc));
    // expected segment 0 offset is 60L, since 2 events are read.
    Map<Segment, Long> expectedOffsetMap = ImmutableMap.of(getSegment(streamName, 0, 0), 60L);
    Map<Stream, StreamCut> scMap = sc.join();
    assertEquals("StreamCut for a single stream expected", 1, scMap.size());
    assertEquals("StreamCut pointing ot offset 30L expected", new StreamCutImpl(stream, expectedOffsetMap), scMap.get(stream));
    // 5. Invoke readerOffline with last position as null. The newer readers should start reading
    // from the last checkpointed position
    readerGroup.readerOffline("readerId", null);
    @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", group, serializer, ReaderConfig.builder().build());
    readAndVerify(reader1, 2);
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ReaderGroup(io.pravega.client.stream.ReaderGroup) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Segment(io.pravega.client.segment.impl.Segment) Checkpoint(io.pravega.client.stream.Checkpoint) InlineExecutor(io.pravega.test.common.InlineExecutor) Stream(io.pravega.client.stream.Stream) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 64 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithNoBounds.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithNoBounds() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.byTime(Duration.ofMillis(0L), Duration.ofMillis(Long.MAX_VALUE));
    String stream1 = "consumptionSize3";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    assertEquals(0L, createResponse1.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse1.getConfig().getReaderGroupId()));
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    assertEquals(0L, createResponse2.getConfig().getGeneration());
    assertFalse(ReaderGroupConfig.DEFAULT_UUID.toString().equals(createResponse2.getConfig().getReaderGroupId()));
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    // example::
    // | s0 | s2           | s7 |
    // |    |              |
    // |    |              |
    // |    |    | s4 | s6 | s8 | s10
    // | s1 | s3 | s5 |    | s9 |
    // valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
    // lower bound = { s0/off, s1/off }
    long two = NameUtils.computeSegmentId(2, 1);
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 2);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    long seven = NameUtils.computeSegmentId(7, 4);
    long eight = NameUtils.computeSegmentId(8, 4);
    long nine = NameUtils.computeSegmentId(9, 4);
    long ten = NameUtils.computeSegmentId(10, 5);
    // 0, 1 -> 2, 3 with different split
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // s3 -> 4, 5
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
    // 4,5 -> 6
    scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 2, 6 -> 7, 8, 9
    scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 7, 8, 9 -> 10
    scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    // invalid streamcut should be rejected
    UpdateSubscriberStatus.Status status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, three, 1L), 0L).join();
    assertEquals(status, UpdateSubscriberStatus.Status.STREAM_CUT_NOT_VALID);
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, two, -1L, eight, -1L, nine, 1L), 0L).join();
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(1L, 1L, two, -1L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(ten, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 10/2
    // subscriber lowerbound is 0/1, 1/1.. truncation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 65 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionWithScale2.

@Test(timeout = 30000)
public void consumptionBasedRetentionWithScale2() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(0L, 1000L);
    String stream1 = "consumptionSize2";
    StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.startUpdateConfiguration(SCOPE, stream1, configuration, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join();
    streamStorePartialMock.completeUpdateConfiguration(SCOPE, stream1, configRecord, null, executor).join();
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse1 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse1.getStatus());
    String subscriber2 = "subscriber2";
    createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber2, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    Controller.CreateReaderGroupResponse createResponse2 = createStatus.join();
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createResponse2.getStatus());
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    final String subscriber2Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber2);
    // example::
    // | s0 | s2           | s7 |
    // |    |              |
    // |    |              |
    // |    |    | s4 | s6 | s8 | s10
    // | s1 | s3 | s5 |    | s9 |
    // valid stream cuts: { s0/off, s9/off, s2/-1, s8/-1}, { s1/off, s2/-1 }
    // lower bound = { s0/off, s1/off }
    long two = NameUtils.computeSegmentId(2, 1);
    long three = NameUtils.computeSegmentId(3, 1);
    long four = NameUtils.computeSegmentId(4, 2);
    long five = NameUtils.computeSegmentId(5, 2);
    long six = NameUtils.computeSegmentId(6, 3);
    long seven = NameUtils.computeSegmentId(7, 4);
    long eight = NameUtils.computeSegmentId(8, 4);
    long nine = NameUtils.computeSegmentId(9, 4);
    long ten = NameUtils.computeSegmentId(10, 5);
    // 0, 1 -> 2, 3 with different split
    scale(SCOPE, stream1, ImmutableMap.of(0L, 1L, 1L, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // s3 -> 4, 5
    scale(SCOPE, stream1, ImmutableMap.of(three, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 0.8), new AbstractMap.SimpleEntry<>(0.8, 1.0)));
    // 4,5 -> 6
    scale(SCOPE, stream1, ImmutableMap.of(four, 1L, five, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 2, 6 -> 7, 8, 9
    scale(SCOPE, stream1, ImmutableMap.of(two, 1L, six, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.3), new AbstractMap.SimpleEntry<>(0.3, 0.6), new AbstractMap.SimpleEntry<>(0.6, 1.0)));
    // 7, 8, 9 -> 10
    scale(SCOPE, stream1, ImmutableMap.of(seven, 1L, eight, 1L, nine, 1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)));
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    // invalid streamcut should be rejected
    UpdateSubscriberStatus.Status status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, three, 1L), 0L).join();
    assertEquals(status, UpdateSubscriberStatus.Status.STREAM_CUT_NOT_VALID);
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, createResponse1.getConfig().getReaderGroupId(), createResponse1.getConfig().getGeneration(), ImmutableMap.of(0L, 1L, two, -1L, eight, -1L, nine, 1L), 0L).join();
    status = streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber2Name, createResponse2.getConfig().getReaderGroupId(), createResponse2.getConfig().getGeneration(), ImmutableMap.of(1L, 1L, two, -1L), 0L).join();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(ten, 2L);
    long size = streamStorePartialMock.getSizeTillStreamCut(SCOPE, stream1, map1, Optional.empty(), null, executor).join();
    doReturn(CompletableFuture.completedFuture(new StreamCutRecord(1L, size, ImmutableMap.copyOf(map1)))).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    // call retention and verify that retention policy applies
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, 1L, null, "").join();
    // now retention set has one stream cut 10/2
    // subscriber lowerbound is 0/1, 1/1.. trucation should happen at lowerbound
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 1L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 1L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) HashMap(java.util.HashMap) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Segment(io.pravega.client.segment.impl.Segment) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Aggregations

StreamCut (io.pravega.client.stream.StreamCut)79 Stream (io.pravega.client.stream.Stream)65 Test (org.junit.Test)65 Segment (io.pravega.client.segment.impl.Segment)48 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)47 HashMap (java.util.HashMap)39 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)37 Cleanup (lombok.Cleanup)30 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)26 Map (java.util.Map)25 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)21 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)21 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)20 CompletableFuture (java.util.concurrent.CompletableFuture)19 ReaderGroup (io.pravega.client.stream.ReaderGroup)18 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)18 EventStreamClientFactory (io.pravega.client.EventStreamClientFactory)17 ClientConfig (io.pravega.client.ClientConfig)16 Futures (io.pravega.common.concurrent.Futures)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)15