Search in sources :

Example 66 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class StreamMetadataTasksTest method consumptionBasedRetentionSizeLimitWithOverlappingMinTest.

@Test(timeout = 30000)
public void consumptionBasedRetentionSizeLimitWithOverlappingMinTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.bySizeBytes(2L, 20L);
    String stream1 = "consumptionSizeOverlap";
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    streamStorePartialMock.createStream(SCOPE, stream1, configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    streamMetadataTasks.setRetentionFrequencyMillis(1L);
    AtomicLong time = new AtomicLong(0L);
    streamMetadataTasks.setRetentionClock(time::get);
    final Segment seg0 = new Segment(SCOPE, stream1, 0L);
    final Segment seg1 = new Segment(SCOPE, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 0L, seg1, 0L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE, stream1), new StreamCutImpl(Stream.of(SCOPE, stream1), endStreamCut));
    ReaderGroupConfig consumpRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    consumpRGConfig = ReaderGroupConfig.cloneConfig(consumpRGConfig, UUID.randomUUID(), 0L);
    doReturn(CompletableFuture.completedFuture(Controller.CreateStreamStatus.Status.SUCCESS)).when(streamMetadataTasks).createRGStream(anyString(), anyString(), any(), anyLong(), anyInt(), anyLong());
    String subscriber1 = "subscriber1";
    CompletableFuture<Controller.CreateReaderGroupResponse> createStatus = streamMetadataTasks.createReaderGroup(SCOPE, subscriber1, consumpRGConfig, System.currentTimeMillis(), 0L);
    assertTrue(Futures.await(processEvent(requestEventWriter)));
    assertEquals(Controller.CreateReaderGroupResponse.Status.SUCCESS, createStatus.join().getStatus());
    // create a retention set that has 5 values
    // retention policy where min = 2, max = 10.
    // s0: seg0/10, seg1/10 ==> size retained if truncated at = 0
    // s1: seg0/10, seg1/8 ==> size retained if truncated at = 2  <== min
    // s2: seg0/10, seg1/7 ==> size retained if truncated at = 3
    // s3: seg0/0, seg1/6 ==> size retained if truncated at = 14
    // s4: seg0/0, seg1/5 ==> size retained if truncated at = 15  <== max
    time.set(10L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 5L, ImmutableMap.of(0L, 0L, 1L, 5L)), null, executor).join();
    time.set(20L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 6L, ImmutableMap.of(0L, 0L, 1L, 6L)), null, executor).join();
    time.set(30L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 17L, ImmutableMap.of(0L, 10L, 1L, 7L)), null, executor).join();
    time.set(40L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 18L, ImmutableMap.of(0L, 10L, 1L, 8L)), null, executor).join();
    time.set(50L);
    streamStorePartialMock.addStreamCutToRetentionSet(SCOPE, stream1, new StreamCutRecord(time.get(), 20L, ImmutableMap.of(0L, 10L, 1L, 10L)), null, executor).join();
    // subscriber streamcut: slb: seg0/9, seg1/10 ==> size retained if truncated at = 1.
    // this is less than min. so we should truncate at min. but min overlaps with slb.
    // so we should actually truncate at s3 which is the streamcut just before slb.
    final String subscriber1Name = NameUtils.getScopedReaderGroupName(SCOPE, subscriber1);
    streamMetadataTasks.updateSubscriberStreamCut(SCOPE, stream1, subscriber1Name, consumpRGConfig.getReaderGroupId().toString(), 0L, ImmutableMap.of(0L, 9L, 1L, 10L), 0L).join();
    streamMetadataTasks.retention(SCOPE, stream1, retentionPolicy, time.get(), null, "").join();
    VersionedMetadata<StreamTruncationRecord> truncationRecord = streamStorePartialMock.getTruncationRecord(SCOPE, stream1, null, executor).join();
    assertEquals(truncationRecord.getObject().getStreamCut().get(0L).longValue(), 0L);
    assertEquals(truncationRecord.getObject().getStreamCut().get(1L).longValue(), 6L);
    assertTrue(truncationRecord.getObject().isUpdating());
    streamStorePartialMock.completeTruncation(SCOPE, stream1, truncationRecord, null, executor).join();
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamCut(io.pravega.client.stream.StreamCut) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Segment(io.pravega.client.segment.impl.Segment) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Stream(io.pravega.client.stream.Stream) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 67 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class StreamMetadataTasksTest method timeBasedRetentionStreamTest.

@Test(timeout = 30000)
public void timeBasedRetentionStreamTest() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.TIME).retentionParam(Duration.ofMinutes(60).toMillis()).build();
    final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    doAnswer(x -> CompletableFuture.completedFuture(Collections.emptyList())).when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
    streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
    streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
    assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
    WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
    streamMetadataTasks.setRequestEventWriter(requestEventWriter);
    AtomicLong time = new AtomicLong(System.currentTimeMillis());
    streamMetadataTasks.setRetentionClock(time::get);
    long recordingTime1 = time.get();
    Map<Long, Long> map1 = new HashMap<>();
    map1.put(0L, 1L);
    map1.put(1L, 1L);
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime1, Long.MIN_VALUE, ImmutableMap.copyOf(map1));
    doReturn(CompletableFuture.completedFuture(streamCut1)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime1, null, "").get();
    // verify that one streamCut is generated and added.
    List<StreamCutRecord> list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    assertTrue(list.contains(streamCut1));
    Map<Long, Long> map2 = new HashMap<>();
    map2.put(0L, 10L);
    map2.put(1L, 10L);
    long recordingTime2 = recordingTime1 + Duration.ofMinutes(5).toMillis();
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime2, Long.MIN_VALUE, ImmutableMap.copyOf(map2));
    doReturn(CompletableFuture.completedFuture(streamCut2)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), // mock only isTransactionOngoing call.
    any());
    time.set(recordingTime2);
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime2, null, "").get();
    list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
    // verify that only one stream cut is in retention set. streamCut2 is not added
    // verify that truncation did not happen
    assertTrue(list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(!truncProp.isUpdating());
    Map<Long, Long> map3 = new HashMap<>();
    map3.put(0L, 20L);
    map3.put(1L, 20L);
    long recordingTime3 = recordingTime1 + Duration.ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis() + 1;
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime3, Long.MIN_VALUE, ImmutableMap.copyOf(map3));
    doReturn(CompletableFuture.completedFuture(streamCut3)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), // mock only isTransactionOngoing call.
    any());
    time.set(recordingTime3);
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime3, null, "").get();
    // verify two stream cuts are in retention set. Cut 1 and 3.
    // verify that Truncation not not happened.
    list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
    assertTrue(list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(!truncProp.isUpdating());
    Map<Long, Long> map4 = new HashMap<>();
    map4.put(0L, 20L);
    map4.put(1L, 20L);
    long recordingTime4 = recordingTime1 + retentionPolicy.getRetentionParam() + 2;
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime4, Long.MIN_VALUE, ImmutableMap.copyOf(map4));
    doReturn(CompletableFuture.completedFuture(streamCut4)).when(streamMetadataTasks).generateStreamCut(anyString(), anyString(), any(), any(), any());
    time.set(recordingTime4);
    streamMetadataTasks.retention(SCOPE, "test", retentionPolicy, recordingTime4, null, "").get();
    // verify that only two stream cut are in retention set. streamcut 3 and 4
    // verify that truncation has started. verify that streamCut1 is removed from retention set as that has been used for truncation
    list = streamStorePartialMock.getRetentionSet(SCOPE, "test", null, executor).thenCompose(retentionSet -> {
        return Futures.allOfWithResults(retentionSet.getRetentionRecords().stream().map(x -> streamStorePartialMock.getStreamCutRecord(SCOPE, "test", x, null, executor)).collect(Collectors.toList()));
    }).join();
    truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).get().getObject();
    assertTrue(!list.contains(streamCut1));
    assertTrue(!list.contains(streamCut2));
    assertTrue(list.contains(streamCut3));
    assertTrue(list.contains(streamCut4));
    assertTrue(truncProp.isUpdating());
    assertTrue(truncProp.getStreamCut().get(0L) == 1L && truncProp.getStreamCut().get(1L) == 1L);
    doCallRealMethod().when(streamStorePartialMock).listSubscribers(any(), any(), any(), any());
}
Also used : UpdateStreamEvent(io.pravega.shared.controller.event.UpdateStreamEvent) Arrays(java.util.Arrays) StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AbstractStreamMetadataStore(io.pravega.controller.store.stream.AbstractStreamMetadataStore) StoreException(io.pravega.controller.store.stream.StoreException) AutoScaleTask(io.pravega.controller.server.eventProcessor.requesthandlers.AutoScaleTask) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Duration(java.time.Duration) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) LockFailedException(io.pravega.controller.store.task.LockFailedException) Mockito.doReturn(org.mockito.Mockito.doReturn) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) StreamMetadataStoreTestHelper(io.pravega.controller.store.stream.StreamMetadataStoreTestHelper) DeleteStreamEvent(io.pravega.shared.controller.event.DeleteStreamEvent) RequestTracker(io.pravega.common.tracing.RequestTracker) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) ControllerEvent(io.pravega.shared.controller.event.ControllerEvent) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) Assert.assertFalse(org.junit.Assert.assertFalse) CreateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.CreateReaderGroupTask) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) Futures(io.pravega.common.concurrent.Futures) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) Mockito.mock(org.mockito.Mockito.mock) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) Mockito.doCallRealMethod(org.mockito.Mockito.doCallRealMethod) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) Mock(org.mockito.Mock) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) TruncateStreamEvent(io.pravega.shared.controller.event.TruncateStreamEvent) Mockito.spy(org.mockito.Mockito.spy) Supplier(java.util.function.Supplier) UpdateSubscriberStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateSubscriberStatus) ArrayList(java.util.ArrayList) Lists(com.google.common.collect.Lists) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) EventHelper(io.pravega.controller.task.EventHelper) UpdateReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateReaderGroupTask) CreateReaderGroupEvent(io.pravega.shared.controller.event.CreateReaderGroupEvent) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) EventHelperMock(io.pravega.controller.mocks.EventHelperMock) Assert.assertTrue(org.junit.Assert.assertTrue) CreateStreamResponse(io.pravega.controller.store.stream.CreateStreamResponse) Test(org.junit.Test) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) Assert.assertNull(org.junit.Assert.assertNull) DeleteStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) DeleteReaderGroupStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteReaderGroupStatus) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) CommitEvent(io.pravega.shared.controller.event.CommitEvent) AssertExtensions(io.pravega.test.common.AssertExtensions) DeleteReaderGroupEvent(io.pravega.shared.controller.event.DeleteReaderGroupEvent) TimeoutException(java.util.concurrent.TimeoutException) SealStreamEvent(io.pravega.shared.controller.event.SealStreamEvent) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Stream(io.pravega.client.stream.Stream) After(org.junit.After) SubscribersResponse(io.pravega.controller.stream.api.grpc.v1.Controller.SubscribersResponse) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) ImmutableSet(com.google.common.collect.ImmutableSet) SealStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.SealStreamTask) ImmutableMap(com.google.common.collect.ImmutableMap) AssertExtensions.assertFutureThrows(io.pravega.test.common.AssertExtensions.assertFutureThrows) DeleteScopeEvent(io.pravega.shared.controller.event.DeleteScopeEvent) CompletionException(java.util.concurrent.CompletionException) ScaleStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse.ScaleStreamStatus) UUID(java.util.UUID) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) AbstractClientFactoryImpl(io.pravega.client.stream.impl.AbstractClientFactoryImpl) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Config(io.pravega.controller.util.Config) UpdateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.UpdateStreamTask) TxnStatus(io.pravega.controller.store.stream.TxnStatus) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) Optional(java.util.Optional) DeleteScopeTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask) TruncateStreamTask(io.pravega.controller.server.eventProcessor.requesthandlers.TruncateStreamTask) NotImplementedException(org.apache.commons.lang3.NotImplementedException) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) OperationContext(io.pravega.controller.store.stream.OperationContext) Segment(io.pravega.client.segment.impl.Segment) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) Getter(lombok.Getter) SegmentHelper(io.pravega.controller.server.SegmentHelper) ModelHelper(io.pravega.client.control.impl.ModelHelper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) BucketStore(io.pravega.controller.store.stream.BucketStore) AbortEvent(io.pravega.shared.controller.event.AbortEvent) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) LinkedList(java.util.LinkedList) ArgumentMatchers.anyInt(org.mockito.ArgumentMatchers.anyInt) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ScaleResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ScaleResponse) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) ControllerService(io.pravega.controller.server.ControllerService) NameUtils(io.pravega.shared.NameUtils) Assert.assertNotNull(org.junit.Assert.assertNotNull) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) UpdateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.UpdateStreamStatus) Data(lombok.Data) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) UpdateReaderGroupEvent(io.pravega.shared.controller.event.UpdateReaderGroupEvent) DeleteReaderGroupTask(io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask) ReaderGroupConfigResponse(io.pravega.controller.stream.api.grpc.v1.Controller.ReaderGroupConfigResponse) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) Collections(java.util.Collections) ClientConfig(io.pravega.client.ClientConfig) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) HashMap(java.util.HashMap) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) ControllerEventStreamWriterMock(io.pravega.controller.mocks.ControllerEventStreamWriterMock) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) RGStreamCutRecord(io.pravega.shared.controller.event.RGStreamCutRecord) StreamCutRecord(io.pravega.controller.store.stream.records.StreamCutRecord) Test(org.junit.Test)

Example 68 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class ReaderCheckpointTest method generateStreamCutsTest.

@Test
public void generateStreamCutsTest() {
    controllerURI = fetchControllerURI();
    final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
    StreamManager streamManager = StreamManager.create(clientConfig);
    assertTrue("Creating Scope", streamManager.createScope(SCOPE_2));
    assertTrue("Creating stream", streamManager.createStream(SCOPE_2, STREAM, streamConfig));
    @Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE_2, clientConfig);
    readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(io.pravega.client.stream.Stream.of(SCOPE_2, STREAM)).groupRefreshTimeMillis(GROUP_REFRESH_TIME_MILLIS).build());
    @Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP_NAME);
    int startInclusive = 1;
    int endExclusive = 100;
    log.info("Write events with range [{},{})", startInclusive, endExclusive);
    writeEvents(SCOPE_2, IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
    readEventsAndVerify(SCOPE_2, startInclusive, endExclusive);
    // Obtain StreamCuts at 100th event.
    Map<Stream, StreamCut> cutAt100 = generateStreamCuts(readerGroup);
    // Write and read events 100 to 200
    startInclusive = 100;
    endExclusive = 200;
    log.info("Write events with range [{},{})", startInclusive, endExclusive);
    writeEvents(SCOPE_2, IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
    readEventsAndVerify(SCOPE_2, startInclusive, endExclusive);
    // Reset to stream cut pointing to 100th event.
    readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(cutAt100).build());
    readEventsAndVerify(SCOPE_2, 100, endExclusive);
    // Obtain stream cut at 200th event.
    Map<Stream, StreamCut> cutAt200 = generateStreamCuts(readerGroup);
    // Write and read events 200 to 300.
    startInclusive = 200;
    endExclusive = 300;
    log.info("Write events with range [{},{})", startInclusive, endExclusive);
    writeEvents(SCOPE_2, IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
    readEventsAndVerify(SCOPE_2, startInclusive, endExclusive);
    // Reset back to stream cut pointing to 200th event.
    readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(cutAt200).build());
    readEventsAndVerify(SCOPE_2, 200, endExclusive);
    // Reset back to stream cut pointing to 100th event.
    readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(cutAt100).build());
    readEventsAndVerify(SCOPE_2, 100, endExclusive);
    // clean up
    readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) StreamManager(io.pravega.client.admin.StreamManager) ReaderGroup(io.pravega.client.stream.ReaderGroup) Stream(io.pravega.client.stream.Stream) IntStream(java.util.stream.IntStream) ClientConfig(io.pravega.client.ClientConfig) Cleanup(lombok.Cleanup) Checkpoint(io.pravega.client.stream.Checkpoint) Test(org.junit.Test)

Example 69 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class SingleSubscriberUpdateRetentionStreamCutTest method singleSubscriberCBRTest.

@Test
public void singleSubscriberCBRTest() throws Exception {
    final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
    @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
    // Write a single event.
    log.info("Writing event e1 to {}/{}", SCOPE, STREAM);
    writer.writeEvent("e1", SIZE_30_EVENT).join();
    @Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
    readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
    ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader(READER_GROUP + "-" + 1, READER_GROUP, new JavaSerializer<>(), readerConfig);
    // Read one event.
    log.info("Reading event e1 from {}/{}", SCOPE, STREAM);
    EventRead<String> read = reader.readNextEvent(READ_TIMEOUT);
    assertFalse(read.isCheckpoint());
    assertEquals("data of size 30", read.getEvent());
    // Update the retention stream-cut.
    log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
    CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(streamCutExecutor);
    // Wait for 5 seconds to force reader group state update. This will allow for the silent
    // checkpoint event generated as part of generateStreamCuts to be picked and processed.
    Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
    EventRead<String> emptyEvent = reader.readNextEvent(READ_TIMEOUT);
    assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts, 10_000));
    Map<Stream, StreamCut> streamCuts = futureCuts.join();
    log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts);
    readerGroup.updateRetentionStreamCut(streamCuts);
    // Write two more events.
    log.info("Writing event e2 to {}/{}", SCOPE, STREAM);
    writer.writeEvent("e2", SIZE_30_EVENT).join();
    log.info("Writing event e3 to {}/{}", SCOPE, STREAM);
    writer.writeEvent("e3", SIZE_30_EVENT).join();
    // Check to make sure truncation happened after the first event.
    // The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
    // and a little longer in order to confirm that the retention has taken place.
    AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 30.", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 30), 1000, 5 * 60 * 1000L);
    // Read next event.
    log.info("Reading event e2 from {}/{}", SCOPE, STREAM);
    read = reader.readNextEvent(READ_TIMEOUT);
    assertFalse(read.isCheckpoint());
    assertEquals("data of size 30", read.getEvent());
    // Update the retention stream-cut.
    log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
    CompletableFuture<Map<Stream, StreamCut>> futureCuts2 = readerGroup.generateStreamCuts(streamCutExecutor);
    // Wait for 5 seconds to force reader group state update. This will allow for the silent
    // checkpoint event generated as part of generateStreamCuts to be picked and processed.
    Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
    EventRead<String> emptyEvent2 = reader.readNextEvent(READ_TIMEOUT);
    assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts2, 10_000));
    Map<Stream, StreamCut> streamCuts2 = futureCuts2.join();
    log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts2);
    readerGroup.updateRetentionStreamCut(streamCuts2);
    // Check to make sure truncation happened after the second event.
    // The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
    // and a little longer in order to confirm that the retention has taken place.
    AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 60", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 60), 1000, 5 * 60 * 1000L);
}
Also used : StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) MarathonException(mesosphere.marathon.client.MarathonException) StreamImpl(io.pravega.client.stream.impl.StreamImpl) AssertExtensions(io.pravega.test.common.AssertExtensions) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) StreamManager(io.pravega.client.admin.StreamManager) Exceptions(io.pravega.common.Exceptions) RunWith(org.junit.runner.RunWith) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) ReaderGroup(io.pravega.client.stream.ReaderGroup) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventRead(io.pravega.client.stream.EventRead) Service(io.pravega.test.system.framework.services.Service) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Stream(io.pravega.client.stream.Stream) After(org.junit.After) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) URI(java.net.URI) Utils(io.pravega.test.system.framework.Utils) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) Before(org.junit.Before) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) Environment(io.pravega.test.system.framework.Environment) Assert.assertTrue(org.junit.Assert.assertTrue) EventStreamReader(io.pravega.client.stream.EventStreamReader) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) ControllerImplConfig(io.pravega.client.control.impl.ControllerImplConfig) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) ReaderConfig(io.pravega.client.stream.ReaderConfig) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) RandomFactory(io.pravega.common.hash.RandomFactory) Controller(io.pravega.client.control.impl.Controller) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Futures(io.pravega.common.concurrent.Futures) SystemTestRunner(io.pravega.test.system.framework.SystemTestRunner) Assert.assertEquals(org.junit.Assert.assertEquals) ClientConfig(io.pravega.client.ClientConfig) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) ReaderGroup(io.pravega.client.stream.ReaderGroup) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream) ClientConfig(io.pravega.client.ClientConfig) Map(java.util.Map) Test(org.junit.Test)

Example 70 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class BatchClientSimpleTest method batchClientSimpleTest.

/**
 * This test verifies the basic functionality of {@link BatchClientFactory}, including stream metadata checks, segment
 * counts, parallel segment reads and reads with offsets using stream cuts.
 */
@Test
@SuppressWarnings("deprecation")
public void batchClientSimpleTest() {
    final int totalEvents = RG_PARALLELISM * 100;
    final int offsetEvents = RG_PARALLELISM * 20;
    final int batchIterations = 4;
    final Stream stream = Stream.of(SCOPE, STREAM);
    final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
    @Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
    ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
    @Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
    @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
    log.info("Invoking batchClientSimpleTest test with Controller URI: {}", controllerURI);
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
    groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(SCOPE + "/" + STREAM).build());
    ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
    log.info("Writing events to stream");
    // Write events to the Stream.
    writeEvents(clientFactory, STREAM, totalEvents);
    // Instantiate readers to consume from Stream up to truncatedEvents.
    List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, RG_PARALLELISM, offsetEvents);
    Futures.allOf(futures).join();
    // Create a stream cut on the specified offset position.
    Checkpoint cp = readerGroup.initiateCheckpoint("batchClientCheckpoint", executor).join();
    StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
    // Instantiate the batch client and assert it provides correct stream info.
    log.debug("Creating batch client.");
    StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, stream.getStreamName());
    log.debug("Validating stream metadata fields.");
    assertEquals("Expected Stream name: ", STREAM, streamInfo.getStreamName());
    assertEquals("Expected Scope name: ", SCOPE, streamInfo.getScope());
    // Test that we can read events from parallel segments from an offset onwards.
    log.debug("Reading events from stream cut onwards in parallel.");
    List<SegmentRange> ranges = Lists.newArrayList(batchClient.getSegments(stream, streamCut, StreamCut.UNBOUNDED).getIterator());
    assertEquals("Expected events read: ", totalEvents - offsetEvents, readFromRanges(ranges, batchClient));
    // Emulate the behavior of Hadoop client: i) Get tail of Stream, ii) Read from current point until tail, iii) repeat.
    log.debug("Reading in batch iterations.");
    StreamCut currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
    int readEvents = 0;
    for (int i = 0; i < batchIterations; i++) {
        writeEvents(clientFactory, STREAM, totalEvents);
        // Read all the existing events in parallel segments from the previous tail to the current one.
        ranges = Lists.newArrayList(batchClient.getSegments(stream, currentTailStreamCut, StreamCut.UNBOUNDED).getIterator());
        assertEquals("Expected number of segments: ", RG_PARALLELISM, ranges.size());
        readEvents += readFromRanges(ranges, batchClient);
        log.debug("Events read in parallel so far: {}.", readEvents);
        currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
    }
    assertEquals("Expected events read: .", totalEvents * batchIterations, readEvents);
    // Truncate the stream in first place.
    log.debug("Truncating stream at event {}.", offsetEvents);
    assertTrue(controller.truncateStream(SCOPE, STREAM, streamCut).join());
    // Test the batch client when we select to start reading a Stream from a truncation point.
    StreamCut initialPosition = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getHeadStreamCut();
    List<SegmentRange> newRanges = Lists.newArrayList(batchClient.getSegments(stream, initialPosition, StreamCut.UNBOUNDED).getIterator());
    assertEquals("Expected events read: ", (totalEvents - offsetEvents) + totalEvents * batchIterations, readFromRanges(newRanges, batchClient));
    log.debug("Events correctly read from Stream: simple batch client test passed.");
}
Also used : SegmentRange(io.pravega.client.batch.SegmentRange) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) ReaderGroup(io.pravega.client.stream.ReaderGroup) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) Cleanup(lombok.Cleanup) Checkpoint(io.pravega.client.stream.Checkpoint) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) CompletableFuture(java.util.concurrent.CompletableFuture) Checkpoint(io.pravega.client.stream.Checkpoint) BatchClientFactory(io.pravega.client.BatchClientFactory) StreamInfo(io.pravega.client.admin.StreamInfo) Stream(io.pravega.client.stream.Stream) ClientConfig(io.pravega.client.ClientConfig) Test(org.junit.Test)

Aggregations

StreamCut (io.pravega.client.stream.StreamCut)79 Stream (io.pravega.client.stream.Stream)65 Test (org.junit.Test)65 Segment (io.pravega.client.segment.impl.Segment)48 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)47 HashMap (java.util.HashMap)39 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)37 Cleanup (lombok.Cleanup)30 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)26 Map (java.util.Map)25 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)21 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)21 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)20 CompletableFuture (java.util.concurrent.CompletableFuture)19 ReaderGroup (io.pravega.client.stream.ReaderGroup)18 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)18 EventStreamClientFactory (io.pravega.client.EventStreamClientFactory)17 ClientConfig (io.pravega.client.ClientConfig)16 Futures (io.pravega.common.concurrent.Futures)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)15