Search in sources :

Example 36 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class WatermarkingTest method watermarkTxnTest.

@Test(timeout = 120000)
public void watermarkTxnTest() throws Exception {
    Controller controller = PRAVEGA.getLocalController();
    String scope = "scopeTx";
    String stream = "watermarkTxnTest";
    ClientConfig clientConfig = ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build();
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
    @Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
    streamManager.createScope(scope);
    streamManager.createStream(scope, stream, config);
    Stream streamObj = Stream.of(scope, stream);
    // create 2 writers
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
    JavaSerializer<Long> javaSerializer = new JavaSerializer<>();
    @Cleanup TransactionalEventStreamWriter<Long> writer1 = clientFactory.createTransactionalEventWriter("writer1", stream, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(10000).build());
    @Cleanup TransactionalEventStreamWriter<Long> writer2 = clientFactory.createTransactionalEventWriter("writer2", stream, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(10000).build());
    AtomicBoolean stopFlag = new AtomicBoolean(false);
    // write events
    CompletableFuture<Void> writer1Future = writeTxEvents(writer1, stopFlag);
    CompletableFuture<Void> writer2Future = writeTxEvents(writer2, stopFlag);
    // scale the stream several times so that we get complex positions
    scale(controller, streamObj, config);
    @Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
    @Cleanup ClientFactoryImpl syncClientFactory = new ClientFactoryImpl(scope, new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor()), connectionFactory);
    String markStream = NameUtils.getMarkStreamForStream(stream);
    @Cleanup RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
    LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>();
    fetchWatermarks(watermarkReader, watermarks, stopFlag);
    AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 2, 100000);
    stopFlag.set(true);
    writer1Future.join();
    writer2Future.join();
    // read events from the stream
    @Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, syncClientFactory);
    Watermark watermark0 = watermarks.take();
    Watermark watermark1 = watermarks.take();
    assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
    assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
    assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
    Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(scope, stream, x.getKey().getSegmentId()), Map.Entry::getValue));
    Map<Segment, Long> positionMap1 = watermark1.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(scope, stream, x.getKey().getSegmentId()), Map.Entry::getValue));
    StreamCut streamCutFirst = new StreamCutImpl(streamObj, positionMap0);
    StreamCut streamCutSecond = new StreamCutImpl(streamObj, positionMap1);
    Map<Stream, StreamCut> firstMarkStreamCut = Collections.singletonMap(streamObj, streamCutFirst);
    Map<Stream, StreamCut> secondMarkStreamCut = Collections.singletonMap(streamObj, streamCutSecond);
    // read from stream cut of first watermark
    String readerGroup = "watermarkTxnTest-group";
    readerGroupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().stream(streamObj).startingStreamCuts(firstMarkStreamCut).endingStreamCuts(secondMarkStreamCut).disableAutomaticCheckpoints().build());
    @Cleanup final EventStreamReader<Long> reader = clientFactory.createReader("myreaderTx", readerGroup, javaSerializer, ReaderConfig.builder().build());
    EventRead<Long> event = reader.readNextEvent(10000L);
    TimeWindow currentTimeWindow = reader.getCurrentTimeWindow(streamObj);
    while (event.getEvent() != null && currentTimeWindow.getLowerTimeBound() == null && currentTimeWindow.getUpperTimeBound() == null) {
        event = reader.readNextEvent(10000L);
        currentTimeWindow = reader.getCurrentTimeWindow(streamObj);
    }
    assertNotNull(currentTimeWindow.getUpperTimeBound());
    // read all events and verify that all events are below the bounds
    while (event.getEvent() != null) {
        Long time = event.getEvent();
        log.info("timewindow = {} event = {}", currentTimeWindow, time);
        assertTrue(currentTimeWindow.getLowerTimeBound() == null || time >= currentTimeWindow.getLowerTimeBound());
        assertTrue(currentTimeWindow.getUpperTimeBound() == null || time <= currentTimeWindow.getUpperTimeBound());
        TimeWindow nextTimeWindow = reader.getCurrentTimeWindow(streamObj);
        assertTrue(currentTimeWindow.getLowerTimeBound() == null || nextTimeWindow.getLowerTimeBound() >= currentTimeWindow.getLowerTimeBound());
        assertTrue(currentTimeWindow.getUpperTimeBound() == null || nextTimeWindow.getUpperTimeBound() >= currentTimeWindow.getUpperTimeBound());
        currentTimeWindow = nextTimeWindow;
        event = reader.readNextEvent(10000L);
        if (event.isCheckpoint()) {
            event = reader.readNextEvent(10000L);
        }
    }
    assertNotNull(currentTimeWindow.getLowerTimeBound());
}
Also used : StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) AssertExtensions(io.pravega.test.common.AssertExtensions) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) Cleanup(lombok.Cleanup) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventRead(io.pravega.client.stream.EventRead) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Stream(io.pravega.client.stream.Stream) Duration(java.time.Duration) Map(java.util.Map) TimeWindow(io.pravega.client.stream.TimeWindow) ClassRule(org.junit.ClassRule) Transaction(io.pravega.client.stream.Transaction) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) WatermarkSerializer(io.pravega.client.watermark.WatermarkSerializer) CompletionException(java.util.concurrent.CompletionException) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) ControllerImplConfig(io.pravega.client.control.impl.ControllerImplConfig) Slf4j(lombok.extern.slf4j.Slf4j) ThreadPooledTestSuite(io.pravega.test.common.ThreadPooledTestSuite) Controller(io.pravega.client.control.impl.Controller) Futures(io.pravega.common.concurrent.Futures) Segment(io.pravega.client.segment.impl.Segment) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) StreamManager(io.pravega.client.admin.StreamManager) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicReference(java.util.concurrent.atomic.AtomicReference) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) RevisionedStreamClient(io.pravega.client.state.RevisionedStreamClient) SynchronizerConfig(io.pravega.client.state.SynchronizerConfig) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) TransactionalEventStreamWriter(io.pravega.client.stream.TransactionalEventStreamWriter) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) NameUtils(io.pravega.shared.NameUtils) Iterator(java.util.Iterator) Assert.assertNotNull(org.junit.Assert.assertNotNull) Assert.assertTrue(org.junit.Assert.assertTrue) EventStreamReader(io.pravega.client.stream.EventStreamReader) Test(org.junit.Test) Watermark(io.pravega.shared.watermarks.Watermark) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) Assert.assertNull(org.junit.Assert.assertNull) SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) Revision(io.pravega.client.state.Revision) ReaderConfig(io.pravega.client.stream.ReaderConfig) TxnFailedException(io.pravega.client.stream.TxnFailedException) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ClientConfig(io.pravega.client.ClientConfig) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Cleanup(lombok.Cleanup) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) Segment(io.pravega.client.segment.impl.Segment) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) ClientConfig(io.pravega.client.ClientConfig) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) WatermarkSerializer(io.pravega.client.watermark.WatermarkSerializer) Controller(io.pravega.client.control.impl.Controller) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) TimeWindow(io.pravega.client.stream.TimeWindow) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) StreamManager(io.pravega.client.admin.StreamManager) AtomicLong(java.util.concurrent.atomic.AtomicLong) Watermark(io.pravega.shared.watermarks.Watermark) Map(java.util.Map) HashMap(java.util.HashMap) Test(org.junit.Test)

Example 37 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class ControllerServiceTest method readerGroupsTest.

private static void readerGroupsTest(Controller controller, final String scope, final String stream1, final String stream2, final String stream3) throws InterruptedException, ExecutionException {
    final String scopedStreamName1 = NameUtils.getScopedStreamName(scope, stream1);
    final String scopedStreamName2 = NameUtils.getScopedStreamName(scope, stream2);
    final String scopedStreamName3 = NameUtils.getScopedStreamName(scope, stream3);
    final Segment seg0 = new Segment(scope, stream1, 0L);
    final Segment seg1 = new Segment(scope, stream1, 1L);
    ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
    Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(scope, stream1), new StreamCutImpl(Stream.of(scope, stream1), startStreamCut), Stream.of(scope, stream2), new StreamCutImpl(Stream.of(scope, stream2), startStreamCut));
    ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
    Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(scope, stream1), new StreamCutImpl(Stream.of(scope, stream1), endStreamCut), Stream.of(scope, stream2), new StreamCutImpl(Stream.of(scope, stream2), endStreamCut));
    ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
    final ReaderGroupConfig rgConfig1 = ReaderGroupConfig.cloneConfig(rgConfig, UUID.randomUUID(), 0L);
    // Create Reader Group rg1
    ReaderGroupConfig createRGResult = controller.createReaderGroup(scope, "rg1", rgConfig1).get();
    assertEquals(rgConfig1.getReaderGroupId(), createRGResult.getReaderGroupId());
    assertThrows(IllegalArgumentException.class, () -> controller.createReaderGroup(scope, "bad_rg_name", rgConfig1).get());
    assertThrows(IllegalArgumentException.class, () -> controller.createReaderGroup("badscope", "rg3", rgConfig1).get());
    ReaderGroupConfig rgConfig2 = ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).stream(scopedStreamName1).stream(scopedStreamName2).automaticCheckpointIntervalMillis(7000L).build();
    rgConfig2 = ReaderGroupConfig.cloneConfig(rgConfig2, UUID.randomUUID(), 0L);
    // Create Reader Group rg2
    createRGResult = controller.createReaderGroup(scope, "rg2", rgConfig2).get();
    assertEquals(rgConfig2.getReaderGroupId(), createRGResult.getReaderGroupId());
    List<String> subscribers = controller.listSubscribers(scope, stream1).get();
    assertTrue(subscribers.size() == 2);
    assertTrue(controller.deleteReaderGroup(scope, "rg2", rgConfig2.getReaderGroupId()).get());
    assertThrows(IllegalArgumentException.class, () -> controller.getReaderGroupConfig(scope, "rg2").get());
    subscribers = controller.listSubscribers(scope, stream1).get();
    assertTrue(subscribers.size() == 1);
    ReaderGroupConfig config = controller.getReaderGroupConfig(scope, "rg1").get();
    assertEquals(rgConfig1.getGroupRefreshTimeMillis(), config.getGroupRefreshTimeMillis());
    assertEquals(rgConfig1.getGeneration(), config.getGeneration());
    assertEquals(rgConfig1.getMaxOutstandingCheckpointRequest(), config.getMaxOutstandingCheckpointRequest());
    assertEquals(rgConfig1.getRetentionType(), config.getRetentionType());
    assertEquals(rgConfig1.getReaderGroupId(), config.getReaderGroupId());
    assertEquals(rgConfig1.getStartingStreamCuts().keySet().size(), config.getStartingStreamCuts().keySet().size());
    assertEquals(rgConfig1.getEndingStreamCuts().keySet().size(), config.getEndingStreamCuts().keySet().size());
    assertTrue(config.getStartingStreamCuts().keySet().contains(Stream.of(scope, stream1)));
    assertTrue(config.getStartingStreamCuts().keySet().contains(Stream.of(scope, stream2)));
    Map<Stream, StreamCut> startSCNew = ImmutableMap.of(Stream.of(scope, stream2), new StreamCutImpl(Stream.of(scope, stream2), startStreamCut), Stream.of(scope, stream3), new StreamCutImpl(Stream.of(scope, stream3), startStreamCut));
    Map<Stream, StreamCut> endSCNew = ImmutableMap.of(Stream.of(scope, stream2), new StreamCutImpl(Stream.of(scope, stream2), endStreamCut), Stream.of(scope, stream3), new StreamCutImpl(Stream.of(scope, stream3), endStreamCut));
    ReaderGroupConfig newRGConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(1000L).groupRefreshTimeMillis(5000L).maxOutstandingCheckpointRequest(7).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSCNew).endingStreamCuts(endSCNew).build();
    newRGConfig = ReaderGroupConfig.cloneConfig(newRGConfig, rgConfig1.getReaderGroupId(), rgConfig1.getGeneration());
    // Update Reader Group rg1
    assertNotNull(controller.updateReaderGroup(scope, "rg1", newRGConfig).get());
    ReaderGroupConfig updatedConfig = controller.getReaderGroupConfig(scope, "rg1").get();
    assertEquals(newRGConfig.getGroupRefreshTimeMillis(), updatedConfig.getGroupRefreshTimeMillis());
    assertEquals(newRGConfig.getGeneration() + 1, updatedConfig.getGeneration());
    assertEquals(newRGConfig.getMaxOutstandingCheckpointRequest(), updatedConfig.getMaxOutstandingCheckpointRequest());
    assertEquals(newRGConfig.getRetentionType(), updatedConfig.getRetentionType());
    assertEquals(newRGConfig.getReaderGroupId(), updatedConfig.getReaderGroupId());
    assertEquals(newRGConfig.getStartingStreamCuts().keySet().size(), updatedConfig.getStartingStreamCuts().keySet().size());
    assertEquals(newRGConfig.getEndingStreamCuts().keySet().size(), updatedConfig.getEndingStreamCuts().keySet().size());
    assertTrue(updatedConfig.getStartingStreamCuts().keySet().contains(Stream.of(scope, stream3)));
    assertTrue(updatedConfig.getStartingStreamCuts().keySet().contains(Stream.of(scope, stream2)));
    // re-create ReaderGroup "rg2"
    ReaderGroupConfig rgConfigRecreate = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scopedStreamName3).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).build();
    rgConfigRecreate = ReaderGroupConfig.cloneConfig(rgConfigRecreate, UUID.randomUUID(), 0L);
    ReaderGroupConfig recreateRGResponse = controller.createReaderGroup(scope, "rg2", rgConfigRecreate).get();
    assertEquals(rgConfigRecreate.getReaderGroupId(), recreateRGResponse.getReaderGroupId());
    assertEquals(rgConfigRecreate.getRetentionType(), recreateRGResponse.getRetentionType());
    // Update a ReaderGroup from Subscriber to Non-subscriber
    final String readerGroupName = "rg3";
    ReaderGroupConfig rgConfigSubscriber = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scopedStreamName1).retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).build();
    rgConfigSubscriber = ReaderGroupConfig.cloneConfig(rgConfigSubscriber, UUID.randomUUID(), 0L);
    ReaderGroupConfig subscriberRG = controller.createReaderGroup(scope, readerGroupName, rgConfigSubscriber).join();
    assertEquals(rgConfigSubscriber.getReaderGroupId(), subscriberRG.getReaderGroupId());
    subscribers = controller.listSubscribers(scope, stream1).get();
    assertEquals(1, subscribers.size());
    ReaderGroupConfig rgConfigNonSubscriber = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scopedStreamName1).build();
    rgConfigNonSubscriber = ReaderGroupConfig.cloneConfig(rgConfigNonSubscriber, rgConfigSubscriber.getReaderGroupId(), rgConfigSubscriber.getGeneration());
    long updatedGen = controller.updateReaderGroup(scope, readerGroupName, rgConfigNonSubscriber).join();
    assertEquals(rgConfigNonSubscriber.getGeneration() + 1, updatedGen);
    updatedConfig = controller.getReaderGroupConfig(scope, readerGroupName).join();
    assertEquals(rgConfigNonSubscriber.getReaderGroupId(), updatedConfig.getReaderGroupId());
    assertEquals(rgConfigNonSubscriber.getRetentionType(), updatedConfig.getRetentionType());
    assertEquals(rgConfigNonSubscriber.getGeneration() + 1, updatedConfig.getGeneration());
    subscribers = controller.listSubscribers(scope, stream1).get();
    assertEquals(0, subscribers.size());
    // Update ReaderGroup from Non-Subscriber to Subscriber
    ReaderGroupConfig subscriberConfig = ReaderGroupConfig.builder().stream(scopedStreamName1).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).build();
    subscriberConfig = ReaderGroupConfig.cloneConfig(subscriberConfig, updatedConfig.getReaderGroupId(), updatedConfig.getGeneration());
    long gen = controller.updateReaderGroup(scope, readerGroupName, subscriberConfig).join();
    assertEquals(subscriberConfig.getGeneration() + 1, gen);
    ReaderGroupConfig newUpdatedConfig = controller.getReaderGroupConfig(scope, readerGroupName).join();
    assertEquals(subscriberConfig.getReaderGroupId(), newUpdatedConfig.getReaderGroupId());
    assertEquals(subscriberConfig.getRetentionType(), newUpdatedConfig.getRetentionType());
    assertEquals(gen, newUpdatedConfig.getGeneration());
    subscribers = controller.listSubscribers(scope, stream1).get();
    assertEquals(1, subscribers.size());
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) StreamCut(io.pravega.client.stream.StreamCut) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) Stream(io.pravega.client.stream.Stream) Segment(io.pravega.client.segment.impl.Segment)

Example 38 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class BoundedStreamReaderTest method testBoundedStreamWithTruncationTest.

@Test(timeout = 60000)
public void testBoundedStreamWithTruncationTest() throws Exception {
    createScope(SCOPE);
    createStream(STREAM3);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
    @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM3, serializer, EventWriterConfig.builder().build());
    // Prep the stream with data.
    // 1.Write events with event size of 30
    writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
    writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
    writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get();
    writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get();
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
    // Streamcut pointing to event 2.
    StreamCut offset30SC = getStreamCut(STREAM3, 30L, 0);
    StreamCut offset60SC = getStreamCut(STREAM3, 60L, 0);
    groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM3), // startStreamCut points to second event in the stream.
    offset30SC, // endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60)
    offset60SC).build());
    final ReaderGroup rg = groupManager.getReaderGroup("group");
    // Create a reader
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
    // 2. Verify if endStreamCut configuration is enforced.
    readAndVerify(reader, 2);
    // The following read should not return events 3, 4 due to the endStreamCut configuration.
    Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent());
    truncateStream(STREAM3, offset60SC);
    // Truncation should not affect the reader as it is already post the truncation point.
    Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent());
    // Reset RG with startStreamCut which is already truncated.
    rg.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM3), offset30SC, StreamCut.UNBOUNDED).build());
    verifyReinitializationRequiredException(reader);
    // Create a reader
    @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", "group", serializer, ReaderConfig.builder().build());
    assertThrows(TruncatedDataException.class, () -> reader2.readNextEvent(10000));
    // subsequent read should return data present post truncation, Event3 is returned here since stream was truncated @ offset 30 * 2.
    readAndVerify(reader2, 3);
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) ReaderGroup(io.pravega.client.stream.ReaderGroup) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 39 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class BoundedStreamReaderTest method testReaderGroupWithSameBounds.

@Test(timeout = 60000)
public void testReaderGroupWithSameBounds() throws Exception {
    createScope(SCOPE);
    createStream(STREAM1);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
    @Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build());
    // 1. Prep the stream with data.
    // Write events with event size of 30
    writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
    writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
    // 2. Create a StreamCut Pointing to offset 30L
    StreamCut streamCut = getStreamCut(STREAM1, 30L, 0);
    // 3. Create a ReaderGroup where the lower and upper bound are the same.
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
    groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM1), streamCut, streamCut).build());
    // 4. Create a reader
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
    // 5. Verify if configuration is enforced.
    Assert.assertNull("Null is expected", reader.readNextEvent(1000).getEvent());
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 40 with StreamCut

use of io.pravega.client.stream.StreamCut in project pravega by pravega.

the class ControllerMetricsTest method streamMetricsTest.

/**
 * This test verifies that the appropriate metrics for Stream operations are updated correctly (counters, latency
 * histograms). Note that this test performs "at least" assertions on metrics as in an environment with concurrent
 * tests running, it might be possible that metrics get updated by other tests.
 */
@Test(timeout = 300000)
public void streamMetricsTest() throws Exception {
    // make unique scope to improve the test isolation.
    final String scope = "controllerMetricsTestScope" + RandomFactory.getSeed();
    final String streamName = "controllerMetricsTestStream";
    final String readerGroupName = "RGControllerMetricsTestStream";
    final int parallelism = 4;
    final int eventsWritten = 10;
    int iterations = 3;
    // At this point, we have at least 6 internal streams.
    StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
    @Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
    streamManager.createScope(scope);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build());
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, controllerURI);
    for (int i = 0; i < iterations; i++) {
        final String iterationStreamName = streamName + i;
        final String iterationReaderGroupName = readerGroupName + RandomFactory.getSeed();
        // Check that the number of streams in metrics has been incremented.
        streamManager.createStream(scope, iterationStreamName, streamConfiguration);
        Counter createdStreamsCounter = MetricRegistryUtils.getCounter(CREATE_STREAM);
        AssertExtensions.assertGreaterThanOrEqual("The counter of created streams", i, (long) createdStreamsCounter.count());
        groupManager.createReaderGroup(iterationReaderGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + iterationStreamName).build());
        for (long j = 1; j < iterations + 1; j++) {
            @Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(iterationReaderGroupName);
            // Update the Stream and check that the number of updated streams and per-stream updates is incremented.
            streamManager.updateStream(scope, iterationStreamName, streamConfiguration);
            Counter updatedStreamsCounter = MetricRegistryUtils.getCounter(globalMetricName(UPDATE_STREAM));
            Counter streamUpdatesCounter = MetricRegistryUtils.getCounter(UPDATE_STREAM, streamTags(scope, iterationStreamName));
            Assert.assertTrue(iterations * i + j <= updatedStreamsCounter.count());
            Assert.assertTrue(j == streamUpdatesCounter.count());
            // Read and write some events.
            writeEvents(clientFactory, iterationStreamName, eventsWritten);
            Futures.allOf(readEvents(clientFactory, iterationReaderGroupName, parallelism));
            // Get a StreamCut for truncating the Stream.
            StreamCut streamCut = readerGroup.generateStreamCuts(executorService()).join().get(Stream.of(scope, iterationStreamName));
            // Truncate the Stream and check that the number of truncated Streams and per-Stream truncations is incremented.
            streamManager.truncateStream(scope, iterationStreamName, streamCut);
            Counter streamTruncationCounter = MetricRegistryUtils.getCounter(globalMetricName(TRUNCATE_STREAM));
            Counter perStreamTruncationCounter = MetricRegistryUtils.getCounter(TRUNCATE_STREAM, streamTags(scope, iterationStreamName));
            Assert.assertTrue(iterations * i + j <= streamTruncationCounter.count());
            Assert.assertTrue(j == perStreamTruncationCounter.count());
        }
        // Check metrics accounting for sealed and deleted streams.
        streamManager.sealStream(scope, iterationStreamName);
        Counter streamSealCounter = MetricRegistryUtils.getCounter(SEAL_STREAM);
        Assert.assertTrue(i + 1 <= streamSealCounter.count());
        streamManager.deleteStream(scope, iterationStreamName);
        Counter streamDeleteCounter = MetricRegistryUtils.getCounter(DELETE_STREAM);
        Assert.assertTrue(i + 1 <= streamDeleteCounter.count());
    }
    // Put assertion on different lines so it can tell more information in case of failure.
    Timer latencyValues1 = MetricRegistryUtils.getTimer(CREATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues1);
    AssertExtensions.assertGreaterThanOrEqual("Number of iterations and latency count do not match.", iterations, latencyValues1.count());
    Timer latencyValues2 = MetricRegistryUtils.getTimer(SEAL_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues2);
    Assert.assertEquals(iterations, latencyValues2.count());
    Timer latencyValues3 = MetricRegistryUtils.getTimer(DELETE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues3);
    Assert.assertEquals(iterations, latencyValues3.count());
    Timer latencyValues4 = MetricRegistryUtils.getTimer(UPDATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues4);
    Assert.assertEquals(iterations * iterations, latencyValues4.count());
    Timer latencyValues5 = MetricRegistryUtils.getTimer(TRUNCATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues5);
    Assert.assertEquals(iterations * iterations, latencyValues5.count());
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Counter(io.micrometer.core.instrument.Counter) StreamCut(io.pravega.client.stream.StreamCut) Timer(io.micrometer.core.instrument.Timer) StreamManager(io.pravega.client.admin.StreamManager) ReaderGroup(io.pravega.client.stream.ReaderGroup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Aggregations

StreamCut (io.pravega.client.stream.StreamCut)79 Stream (io.pravega.client.stream.Stream)65 Test (org.junit.Test)65 Segment (io.pravega.client.segment.impl.Segment)48 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)47 HashMap (java.util.HashMap)39 StreamCutImpl (io.pravega.client.stream.impl.StreamCutImpl)37 Cleanup (lombok.Cleanup)30 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)26 Map (java.util.Map)25 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)21 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)21 ArgumentMatchers.anyLong (org.mockito.ArgumentMatchers.anyLong)20 CompletableFuture (java.util.concurrent.CompletableFuture)19 ReaderGroup (io.pravega.client.stream.ReaderGroup)18 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)18 EventStreamClientFactory (io.pravega.client.EventStreamClientFactory)17 ClientConfig (io.pravega.client.ClientConfig)16 Futures (io.pravega.common.concurrent.Futures)15 AtomicLong (java.util.concurrent.atomic.AtomicLong)15