Search in sources :

Example 16 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class WatermarkWorkflowTest method testWatermarkClient.

@Test(timeout = 10000L)
public void testWatermarkClient() {
    Stream stream = new StreamImpl("scope", "stream");
    SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
    @Cleanup MockRevisionedStreamClient revisionedClient = new MockRevisionedStreamClient();
    doAnswer(x -> revisionedClient).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
    @Cleanup PeriodicWatermarking.WatermarkClient client = new PeriodicWatermarking.WatermarkClient(stream, clientFactory);
    // iteration 1 ==> null -> w1
    client.reinitialize();
    // There is no watermark in the stream. All values should be null and all writers active and participating.
    assertEquals(revisionedClient.getMark(), MockRevision.EMPTY);
    assertTrue(revisionedClient.watermarks.isEmpty());
    assertEquals(client.getPreviousWatermark(), Watermark.EMPTY);
    Map.Entry<String, WriterMark> entry0 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(0L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry1 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(1L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry2 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(2L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry3 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(3L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry4 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(4L, ImmutableMap.of()));
    Map.Entry<String, WriterMark> entry5 = new AbstractMap.SimpleEntry<>("writerId", new WriterMark(5L, ImmutableMap.of()));
    assertTrue(client.isWriterActive(entry0, 0L));
    assertTrue(client.isWriterParticipating(0L));
    Watermark first = new Watermark(1L, 2L, ImmutableMap.of());
    client.completeIteration(first);
    // iteration 2 : do not emit ==> w1 -> w1
    client.reinitialize();
    // There is one watermark. All writers should be active and writers greater than last watermark should be participating
    assertEquals(revisionedClient.getMark(), MockRevision.EMPTY);
    assertEquals(revisionedClient.watermarks.size(), 1);
    assertEquals(client.getPreviousWatermark(), first);
    assertTrue(client.isWriterActive(entry2, 0L));
    assertFalse(client.isWriterActive(entry1, 0L));
    assertTrue(client.isWriterTracked(entry1.getKey()));
    assertFalse(client.isWriterParticipating(1L));
    assertTrue(client.isWriterParticipating(2L));
    // dont emit a watermark. Everything stays same as before.
    client.completeIteration(null);
    // iteration 3 : emit ==> w1 -> w1 w2
    client.reinitialize();
    // There is one watermark. All writers should be active and writers greater than last watermark should be participating
    assertEquals(revisionedClient.getMark(), MockRevision.EMPTY);
    assertEquals(revisionedClient.watermarks.size(), 1);
    assertEquals(client.getPreviousWatermark(), first);
    assertTrue(client.isWriterActive(entry2, 0L));
    assertFalse(client.isWriterParticipating(1L));
    assertTrue(client.isWriterParticipating(2L));
    // emit second watermark
    Watermark second = new Watermark(2L, 3L, ImmutableMap.of());
    client.completeIteration(second);
    // iteration 4: do not emit ==> w1 w2 -> w1 w2
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(0).getKey());
    assertEquals(2, revisionedClient.watermarks.size());
    assertEquals(client.getPreviousWatermark(), second);
    assertFalse(client.isWriterActive(entry2, 0L));
    assertTrue(client.isWriterTracked(entry2.getKey()));
    assertTrue(client.isWriterActive(entry3, 0L));
    assertFalse(client.isWriterParticipating(2L));
    assertTrue(client.isWriterParticipating(3L));
    assertTrue(client.isWriterActive(entry0, 1000L));
    assertTrue(client.isWriterTracked(entry0.getKey()));
    // dont emit a watermark but complete this iteration.
    client.completeIteration(null);
    // iteration 6: emit ==> w1 w2 -> w1 w2 w3
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(0).getKey());
    assertEquals(2, revisionedClient.watermarks.size());
    assertEquals(client.getPreviousWatermark(), second);
    assertTrue(client.isWriterActive(entry3, 0L));
    assertFalse(client.isWriterTracked(entry3.getKey()));
    assertFalse(client.isWriterParticipating(2L));
    assertTrue(client.isWriterParticipating(3L));
    // emit third watermark
    Watermark third = new Watermark(3L, 4L, ImmutableMap.of());
    client.completeIteration(third);
    // iteration 7: do not emit ==> w1 w2 w3 -> w1 w2 w3
    client.reinitialize();
    // active writers should be ahead of first watermark. participating writers should be ahead of second watermark
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(1).getKey());
    assertEquals(3, revisionedClient.watermarks.size());
    assertEquals(client.getPreviousWatermark(), third);
    assertFalse(client.isWriterActive(entry3, 0L));
    assertTrue(client.isWriterActive(entry4, 0L));
    assertFalse(client.isWriterParticipating(3L));
    assertTrue(client.isWriterParticipating(4L));
    client.completeIteration(null);
    // iteration 8 : emit ==> w2 w3 -> w2 w3 w4
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(1).getKey());
    // window = w2 w3
    assertEquals(revisionedClient.watermarks.size(), 3);
    assertEquals(client.getPreviousWatermark(), third);
    assertFalse(client.isWriterActive(entry3, 0L));
    assertTrue(client.isWriterActive(entry4, 0L));
    assertFalse(client.isWriterParticipating(3L));
    assertTrue(client.isWriterParticipating(4L));
    // emit fourth watermark
    Watermark fourth = new Watermark(4L, 5L, ImmutableMap.of());
    client.completeIteration(fourth);
    // iteration 9: do not emit ==> w1 w2 w3 w4 -> w1 w2 w3 w4.. check writer timeout
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(2).getKey());
    assertEquals(revisionedClient.watermarks.size(), 4);
    assertEquals(client.getPreviousWatermark(), fourth);
    assertFalse(client.isWriterActive(entry3, 0L));
    assertTrue(client.isWriterTracked(entry4.getKey()));
    assertFalse(client.isWriterParticipating(4L));
    assertTrue(client.isWriterParticipating(5L));
    // verify that writer is active if we specify a higher timeout
    assertTrue(client.isWriterActive(entry1, 1000L));
    assertTrue(client.isWriterTracked(entry1.getKey()));
    // now that the writer is being tracked
    assertFalse(Futures.delayedTask(() -> client.isWriterActive(entry1, 1L), Duration.ofSeconds(1), executor).join());
    assertTrue(client.isWriterTracked(entry1.getKey()));
    // dont emit a watermark but complete this iteration. This should shrink the window again.
    client.completeIteration(null);
    // iteration 10
    client.reinitialize();
    assertEquals(revisionedClient.getMark(), revisionedClient.watermarks.get(2).getKey());
    assertEquals(revisionedClient.watermarks.size(), 4);
    assertEquals(client.getPreviousWatermark(), fourth);
    assertFalse(client.isWriterActive(entry4, 0L));
    assertTrue(client.isWriterActive(entry5, 0L));
    assertFalse(client.isWriterParticipating(4L));
    assertTrue(client.isWriterParticipating(5L));
}
Also used : WriterMark(io.pravega.controller.store.stream.records.WriterMark) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) Cleanup(lombok.Cleanup) SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) AbstractMap(java.util.AbstractMap) Watermark(io.pravega.shared.watermarks.Watermark) Test(org.junit.Test)

Example 17 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class WatermarkWorkflowTest method testWatermarkingWorkflow.

@Test(timeout = 30000L)
public void testWatermarkingWorkflow() {
    SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
    ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
    doAnswer(x -> {
        String streamName = x.getArgument(0);
        return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
            if (rsc != null) {
                return rsc;
            } else {
                return new MockRevisionedStreamClient();
            }
        });
    }).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
    @Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStore, bucketStore, sp -> clientFactory, executor, new RequestTracker(false));
    String streamName = "stream";
    String scope = "scope";
    streamMetadataStore.createScope(scope, null, executor).join();
    streamMetadataStore.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(10000L).build(), System.currentTimeMillis(), null, executor).join();
    streamMetadataStore.setState(scope, streamName, State.ACTIVE, null, executor).join();
    // set minimum number of segments to 1
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).timestampAggregationTimeout(10000L).build();
    streamMetadataStore.startUpdateConfiguration(scope, streamName, config, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = streamMetadataStore.getConfigurationRecord(scope, streamName, null, executor).join();
    streamMetadataStore.completeUpdateConfiguration(scope, streamName, configRecord, null, executor).join();
    // 2. note writer1, writer2, writer3 marks
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2,
    // writer 3 reports segment 0, 2
    String writer1 = "writer1";
    Map<Long, Long> map1 = ImmutableMap.of(0L, 100L, 1L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 100L, map1, null, executor).join();
    String writer2 = "writer2";
    Map<Long, Long> map2 = ImmutableMap.of(1L, 100L, 2L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 101L, map2, null, executor).join();
    String writer3 = "writer3";
    Map<Long, Long> map3 = ImmutableMap.of(2L, 100L, 0L, 200L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 102L, map3, null, executor).join();
    // 3. run watermarking workflow.
    StreamImpl stream = new StreamImpl(scope, streamName);
    periodicWatermarking.watermark(stream).join();
    // verify that a watermark has been emitted.
    // this should emit a watermark that contains all three segments with offsets = 200L
    // and timestamp = 100L
    MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
    assertEquals(revisionedClient.watermarks.size(), 1);
    Watermark watermark = revisionedClient.watermarks.get(0).getValue();
    assertEquals(watermark.getLowerTimeBound(), 100L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 200L);
    assertEquals(getSegmentOffset(watermark, 2L), 200L);
    // send positions only on segment 1 and segment 2. nothing on segment 0.
    map1 = ImmutableMap.of(1L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 200L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 201L, map2, null, executor).join();
    map3 = ImmutableMap.of(2L, 300L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 202L, map3, null, executor).join();
    // run watermark workflow. this will emit a watermark with time = 200L and streamcut = 0 -> 200L, 1 -> 300L, 2 -> 300L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 2);
    watermark = revisionedClient.watermarks.get(1).getValue();
    assertEquals(watermark.getLowerTimeBound(), 200L);
    assertEquals(watermark.getStreamCut().size(), 3);
    assertEquals(getSegmentOffset(watermark, 0L), 200L);
    assertEquals(getSegmentOffset(watermark, 1L), 300L);
    assertEquals(getSegmentOffset(watermark, 2L), 300L);
    // scale stream 0, 1, 2 -> 3, 4
    scaleStream(streamName, scope);
    // writer 1 reports segments 0, 1.
    // writer 2 reports segments 1, 2
    // writer 3 reports segment 3
    map1 = ImmutableMap.of(0L, 300L, 1L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 302L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 301L, map2, null, executor).join();
    long segment3 = NameUtils.computeSegmentId(3, 1);
    long segment4 = NameUtils.computeSegmentId(4, 1);
    map3 = ImmutableMap.of(segment3, 100L);
    // writer 3 has lowest reported time.
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 300L, map3, null, executor).join();
    // run watermark workflow. this will emit a watermark with time = 300L and streamcut = 3 -> 100L, 4 -> 0L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    watermark = revisionedClient.watermarks.get(2).getValue();
    assertEquals(watermark.getLowerTimeBound(), 300L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 0L);
    // report complete positions from writers.
    // writer 1 reports 0, 1, 2
    // writer 2 reports 0, 1, 2
    // writer 3 doesnt report.
    map1 = ImmutableMap.of(0L, 400L, 1L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 400L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 400L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 401L, map2, null, executor).join();
    // run watermark workflow. there shouldn't be a watermark emitted because writer 3 is active and has not reported a time.
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 3);
    // even though writer3 is excluded from computation, its mark is still not removed because it is still active
    WriterMark writer3Mark = streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor).join();
    assertTrue(writer3Mark.isAlive());
    assertEquals(writer3Mark.getTimestamp(), 300L);
    // report shutdown of writer 3
    streamMetadataStore.shutdownWriter(scope, streamName, writer3, null, executor).join();
    writer3Mark = streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor).join();
    assertFalse(writer3Mark.isAlive());
    assertEquals(writer3Mark.getTimestamp(), 300L);
    // now a watermark should be generated. Time should be advanced. But watermark's stream cut is already ahead of writer's
    // positions so stream cut should not advance.
    // Also writer 3 being inactive and shutdown, should be removed.
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 4);
    watermark = revisionedClient.watermarks.get(3).getValue();
    assertEquals(watermark.getLowerTimeBound(), 400L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 0L);
    AssertExtensions.assertFutureThrows("Writer 3 should have been removed from store", streamMetadataStore.getWriterMark(scope, streamName, writer3, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    // writer 1, 2 and 3 report marks. With writer 3 reporting mark on segment 4. Writer3 will get added again
    map1 = ImmutableMap.of(0L, 500L, 1L, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer1, 500L, map1, null, executor).join();
    map2 = ImmutableMap.of(1L, 100L, 2L, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer2, 501L, map2, null, executor).join();
    map3 = ImmutableMap.of(segment4, 500L);
    streamMetadataStore.noteWriterMark(scope, streamName, writer3, 502L, map3, null, executor).join();
    // run watermarking workflow. It should generate watermark that includes segments 3 -> 100L and 4 -> 500L with time 500L
    periodicWatermarking.watermark(stream).join();
    assertEquals(revisionedClient.watermarks.size(), 5);
    watermark = revisionedClient.watermarks.get(4).getValue();
    assertEquals(watermark.getLowerTimeBound(), 500L);
    assertEquals(watermark.getStreamCut().size(), 2);
    assertEquals(getSegmentOffset(watermark, segment3), 100L);
    assertEquals(getSegmentOffset(watermark, segment4), 500L);
}
Also used : WriterMark(io.pravega.controller.store.stream.records.WriterMark) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) RequestTracker(io.pravega.common.tracing.RequestTracker) Cleanup(lombok.Cleanup) StoreException(io.pravega.controller.store.stream.StoreException) SynchronizerClientFactory(io.pravega.client.SynchronizerClientFactory) StreamImpl(io.pravega.client.stream.impl.StreamImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Watermark(io.pravega.shared.watermarks.Watermark) Test(org.junit.Test)

Example 18 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class ControllerServiceTest method getSegmentsAtTime.

private static void getSegmentsAtTime(Controller controller, final String scope, final String streamName) throws InterruptedException, ExecutionException {
    CompletableFuture<Map<Segment, Long>> segments = controller.getSegmentsAtTime(new StreamImpl(scope, streamName), System.currentTimeMillis());
    assertFalse("FAILURE: Fetching positions at given time stamp failed", segments.get().isEmpty());
}
Also used : StreamImpl(io.pravega.client.stream.impl.StreamImpl) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 19 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class ControllerServiceTest method getSegmentsBeforeCreation.

private static void getSegmentsBeforeCreation(Controller controller, final String scope, final String streamName) throws InterruptedException, ExecutionException {
    CompletableFuture<Map<Segment, Long>> segments = controller.getSegmentsAtTime(new StreamImpl(scope, streamName), System.currentTimeMillis() - 36000);
    assertFalse("FAILURE: Fetching positions at given time before stream creation failed", segments.get().size() == 1);
}
Also used : StreamImpl(io.pravega.client.stream.impl.StreamImpl) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 20 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class LargeEventTest method testReadWriteScaleStreamSeal.

/**
 * Tests that if a Stream is sealed no data will continue to be produced by the {@link io.pravega.client.stream.impl.LargeEventWriter}
 */
@Test(timeout = 60000)
public void testReadWriteScaleStreamSeal() throws ExecutionException, InterruptedException, TimeoutException {
    String readerGroupName = "testLargeEventScaleStreamSealReaderGroup";
    String streamName = "ScaleStreamSeal";
    StreamConfiguration config = StreamConfiguration.builder().retentionPolicy(RetentionPolicy.bySizeBytes(Long.MAX_VALUE)).build();
    createScopeStream(SCOPE_NAME, streamName, config);
    AtomicInteger generation = new AtomicInteger(0);
    // Creates some data to write.
    int events = 1;
    Map<Integer, List<ByteBuffer>> data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
    merge(eventsWrittenToPravega, data);
    // Perform a basic read-write cycle.
    Queue<ByteBuffer> reads = readWriteCycle(streamName, readerGroupName, eventsWrittenToPravega);
    validateEventReads(reads, eventsWrittenToPravega);
    // Define the scale operation.
    Runnable scale = () -> {
        Stream stream = new StreamImpl(SCOPE_NAME, streamName);
        try {
            StreamSegments segments = controller.getCurrentSegments(SCOPE_NAME, streamName).get();
            List<Long> ids = segments.getSegments().stream().map(Segment::getSegmentId).collect(Collectors.toList());
            controller.startScale(stream, ids, Map.of(0.0, 0.5, 0.5, 1.0)).get();
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        }
    };
    // Reset reads.
    eventReadCount.set(0);
    // Try to scale the segment after two `send` calls.
    AtomicInteger sendCount = new AtomicInteger(0);
    Supplier<Boolean> predicate = () -> sendCount.getAndIncrement() == CLOSE_WRITE_COUNT;
    AtomicReference<Boolean> latch = new AtomicReference<>(true);
    try (ConnectionExporter connectionFactory = new ConnectionExporter(ClientConfig.builder().build(), latch, scale, predicate);
        ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
        // Next set of writes.
        data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
        merge(eventsWrittenToPravega, data);
        // Start writing events to the stream.
        val writers = createEventWriters(streamName, NUM_WRITERS, clientFactory, data);
        Futures.allOf(writers).get();
        // Wait for the scale event.
        TestUtils.await(() -> !latch.get(), 200, 2000);
        // Create a ReaderGroup.
        createReaderGroup(readerGroupName, readerGroupManager, streamName);
        // Create Readers.
        val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, eventsReadFromPravega);
        stopReadFlag.set(true);
        Futures.allOf(readers).get();
        log.info("Deleting ReaderGroup: {}", readerGroupName);
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    StreamSegments segments = controller.getCurrentSegments(SCOPE_NAME, streamName).get();
    // Make sure that the scale event has happened.
    Assert.assertEquals("Expected 2 StreamSegments.", 2, segments.getSegments().size());
    // This time there are successor segments, so the data should have been accepted.
    validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
}
Also used : Segment(io.pravega.client.segment.impl.Segment) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) List(java.util.List) ArrayList(java.util.ArrayList) Stream(io.pravega.client.stream.Stream) ExecutionException(java.util.concurrent.ExecutionException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) lombok.val(lombok.val) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamImpl(io.pravega.client.stream.impl.StreamImpl) StreamSegments(io.pravega.client.stream.impl.StreamSegments) Test(org.junit.Test)

Aggregations

StreamImpl (io.pravega.client.stream.impl.StreamImpl)74 Test (org.junit.Test)50 Stream (io.pravega.client.stream.Stream)47 Cleanup (lombok.Cleanup)36 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)32 HashMap (java.util.HashMap)32 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)22 Map (java.util.Map)22 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)21 SocketConnectionFactoryImpl (io.pravega.client.connection.impl.SocketConnectionFactoryImpl)21 Controller (io.pravega.client.control.impl.Controller)21 ClientConfig (io.pravega.client.ClientConfig)20 ReaderGroupManagerImpl (io.pravega.client.admin.impl.ReaderGroupManagerImpl)18 Segment (io.pravega.client.segment.impl.Segment)18 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)18 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)16 Slf4j (lombok.extern.slf4j.Slf4j)14 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)13 CompletableFuture (java.util.concurrent.CompletableFuture)12 Before (org.junit.Before)12