Search in sources :

Example 16 with StreamManager

use of io.pravega.client.admin.StreamManager in project pravega by pravega.

the class ReadWriteTest method readWriteTest.

@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
    String scope = "testMultiReaderWriterScope";
    String readerGroupName = "testMultiReaderWriterReaderGroup";
    // 20  readers -> 20 stream segments ( to have max read parallelism)
    ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
    eventsReadFromPravega = new ConcurrentLinkedQueue<>();
    // data used by each of the writers.
    eventData = new AtomicLong();
    // used by readers to maintain a count of events.
    eventReadCount = new AtomicLong();
    stopReadFlag = new AtomicBoolean(false);
    ClientConfig clientConfig = ClientConfig.builder().build();
    try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
        StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
        // create a scope
        Boolean createScopeStatus = streamManager.createScope(scope);
        log.info("Create scope status {}", createScopeStatus);
        // create a stream
        Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
        log.info("Create stream status {}", createStreamStatus);
    }
    try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
        ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
        // start writing events to the stream
        log.info("Creating {} writers", NUM_WRITERS);
        List<CompletableFuture<Void>> writerList = new ArrayList<>();
        for (int i = 0; i < NUM_WRITERS; i++) {
            log.info("Starting writer{}", i);
            writerList.add(startNewWriter(eventData, clientFactory));
        }
        // create a reader group
        log.info("Creating Reader group : {}", readerGroupName);
        readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
        log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
        log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
        // create readers
        log.info("Creating {} readers", NUM_READERS);
        List<CompletableFuture<Void>> readerList = new ArrayList<>();
        String readerName = "reader" + RandomFactory.create().nextInt(Integer.MAX_VALUE);
        // start reading events
        for (int i = 0; i < NUM_READERS; i++) {
            log.info("Starting reader{}", i);
            readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
        }
        // wait for writers completion
        Futures.allOf(writerList).get();
        ExecutorServiceHelpers.shutdown(writerPool);
        // set stop read flag to true
        stopReadFlag.set(true);
        // wait for readers completion
        Futures.allOf(readerList).get();
        ExecutorServiceHelpers.shutdown(readerPool);
        // delete readergroup
        log.info("Deleting readergroup {}", readerGroupName);
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
    assertEquals(TOTAL_NUM_EVENTS, eventsReadFromPravega.size());
    // check unique events.
    assertEquals(TOTAL_NUM_EVENTS, new TreeSet<>(eventsReadFromPravega).size());
    // seal the stream
    CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
    log.info("Sealing stream {}", STREAM_NAME);
    assertTrue(sealStreamStatus.get());
    // delete the stream
    CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
    log.info("Deleting stream {}", STREAM_NAME);
    assertTrue(deleteStreamStatus.get());
    // delete the  scope
    CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
    log.info("Deleting scope {}", scope);
    assertTrue(deleteScopeStatus.get());
    log.info("Read write test succeeds");
}
Also used : ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) ConnectionPoolImpl(io.pravega.client.connection.impl.ConnectionPoolImpl) ArrayList(java.util.ArrayList) StreamManagerImpl(io.pravega.client.admin.impl.StreamManagerImpl) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) CompletableFuture(java.util.concurrent.CompletableFuture) StreamManager(io.pravega.client.admin.StreamManager) TreeSet(java.util.TreeSet) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ClientConfig(io.pravega.client.ClientConfig) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) Test(org.junit.Test)

Example 17 with StreamManager

use of io.pravega.client.admin.StreamManager in project pravega by pravega.

the class ControllerMetricsTest method streamMetricsTest.

/**
 * This test verifies that the appropriate metrics for Stream operations are updated correctly (counters, latency
 * histograms). Note that this test performs "at least" assertions on metrics as in an environment with concurrent
 * tests running, it might be possible that metrics get updated by other tests.
 */
@Test(timeout = 300000)
public void streamMetricsTest() throws Exception {
    // make unique scope to improve the test isolation.
    final String scope = "controllerMetricsTestScope" + RandomFactory.getSeed();
    final String streamName = "controllerMetricsTestStream";
    final String readerGroupName = "RGControllerMetricsTestStream";
    final int parallelism = 4;
    final int eventsWritten = 10;
    int iterations = 3;
    // At this point, we have at least 6 internal streams.
    StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
    @Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
    streamManager.createScope(scope);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build());
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, controllerURI);
    for (int i = 0; i < iterations; i++) {
        final String iterationStreamName = streamName + i;
        final String iterationReaderGroupName = readerGroupName + RandomFactory.getSeed();
        // Check that the number of streams in metrics has been incremented.
        streamManager.createStream(scope, iterationStreamName, streamConfiguration);
        Counter createdStreamsCounter = MetricRegistryUtils.getCounter(CREATE_STREAM);
        AssertExtensions.assertGreaterThanOrEqual("The counter of created streams", i, (long) createdStreamsCounter.count());
        groupManager.createReaderGroup(iterationReaderGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + iterationStreamName).build());
        for (long j = 1; j < iterations + 1; j++) {
            @Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(iterationReaderGroupName);
            // Update the Stream and check that the number of updated streams and per-stream updates is incremented.
            streamManager.updateStream(scope, iterationStreamName, streamConfiguration);
            Counter updatedStreamsCounter = MetricRegistryUtils.getCounter(globalMetricName(UPDATE_STREAM));
            Counter streamUpdatesCounter = MetricRegistryUtils.getCounter(UPDATE_STREAM, streamTags(scope, iterationStreamName));
            Assert.assertTrue(iterations * i + j <= updatedStreamsCounter.count());
            Assert.assertTrue(j == streamUpdatesCounter.count());
            // Read and write some events.
            writeEvents(clientFactory, iterationStreamName, eventsWritten);
            Futures.allOf(readEvents(clientFactory, iterationReaderGroupName, parallelism));
            // Get a StreamCut for truncating the Stream.
            StreamCut streamCut = readerGroup.generateStreamCuts(executorService()).join().get(Stream.of(scope, iterationStreamName));
            // Truncate the Stream and check that the number of truncated Streams and per-Stream truncations is incremented.
            streamManager.truncateStream(scope, iterationStreamName, streamCut);
            Counter streamTruncationCounter = MetricRegistryUtils.getCounter(globalMetricName(TRUNCATE_STREAM));
            Counter perStreamTruncationCounter = MetricRegistryUtils.getCounter(TRUNCATE_STREAM, streamTags(scope, iterationStreamName));
            Assert.assertTrue(iterations * i + j <= streamTruncationCounter.count());
            Assert.assertTrue(j == perStreamTruncationCounter.count());
        }
        // Check metrics accounting for sealed and deleted streams.
        streamManager.sealStream(scope, iterationStreamName);
        Counter streamSealCounter = MetricRegistryUtils.getCounter(SEAL_STREAM);
        Assert.assertTrue(i + 1 <= streamSealCounter.count());
        streamManager.deleteStream(scope, iterationStreamName);
        Counter streamDeleteCounter = MetricRegistryUtils.getCounter(DELETE_STREAM);
        Assert.assertTrue(i + 1 <= streamDeleteCounter.count());
    }
    // Put assertion on different lines so it can tell more information in case of failure.
    Timer latencyValues1 = MetricRegistryUtils.getTimer(CREATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues1);
    AssertExtensions.assertGreaterThanOrEqual("Number of iterations and latency count do not match.", iterations, latencyValues1.count());
    Timer latencyValues2 = MetricRegistryUtils.getTimer(SEAL_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues2);
    Assert.assertEquals(iterations, latencyValues2.count());
    Timer latencyValues3 = MetricRegistryUtils.getTimer(DELETE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues3);
    Assert.assertEquals(iterations, latencyValues3.count());
    Timer latencyValues4 = MetricRegistryUtils.getTimer(UPDATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues4);
    Assert.assertEquals(iterations * iterations, latencyValues4.count());
    Timer latencyValues5 = MetricRegistryUtils.getTimer(TRUNCATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues5);
    Assert.assertEquals(iterations * iterations, latencyValues5.count());
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Counter(io.micrometer.core.instrument.Counter) StreamCut(io.pravega.client.stream.StreamCut) Timer(io.micrometer.core.instrument.Timer) StreamManager(io.pravega.client.admin.StreamManager) ReaderGroup(io.pravega.client.stream.ReaderGroup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 18 with StreamManager

use of io.pravega.client.admin.StreamManager in project pravega by pravega.

the class TestUtils method createScopeAndStreams.

/**
 * Creates the specified {@code scope} and {@code streams}, using the specified {@code clientConfig}.
 *
 * Note: This method creates the streams using a scaling policy with a fixed number of segments (one each).
 *
 * @param clientConfig the {@link ClientConfig} to use for connecting to the server
 * @param scope the scope
 * @param streams the streams
 * @return whether all the objects (scope and each of the streams) were newly created. Returns {@code false}, if
 *         any of those objects were already present.
 */
public static boolean createScopeAndStreams(ClientConfig clientConfig, String scope, List<String> streams) {
    @Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
    assertNotNull(streamManager);
    boolean result = streamManager.createScope(scope);
    for (String stream : streams) {
        boolean isStreamCreated = streamManager.createStream(scope, stream, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build());
        result = result && !isStreamCreated ? false : true;
    }
    return result;
}
Also used : StreamManager(io.pravega.client.admin.StreamManager) Cleanup(lombok.Cleanup)

Example 19 with StreamManager

use of io.pravega.client.admin.StreamManager in project pravega by pravega.

the class BatchClientTest method testBatchClientWithStreamTruncation.

@Test(timeout = 50000)
@SuppressWarnings("deprecation")
public void testBatchClientWithStreamTruncation() throws InterruptedException, ExecutionException {
    @Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
    createTestStreamWithEvents(clientFactory);
    log.info("Done creating a test stream with test events");
    @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
    log.info("Done creating batch client factory");
    // 1. Create a StreamCut after 2 events(offset = 2 * 30 = 60).
    StreamCut streamCut60L = new StreamCutImpl(Stream.of(SCOPE, STREAM), ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 60L));
    // 2. Truncate stream.
    assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut60L).join());
    // 3a. Fetch Segments using StreamCut.UNBOUNDED>
    ArrayList<SegmentRange> segmentsPostTruncation1 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator());
    // 3b. Fetch Segments using getStreamInfo() api.
    StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, STREAM);
    ArrayList<SegmentRange> segmentsPostTruncation2 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), streamInfo.getHeadStreamCut(), streamInfo.getTailStreamCut()).getIterator());
    // Validate results.
    validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation1);
    validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation2);
}
Also used : SegmentRange(io.pravega.client.batch.SegmentRange) StreamCut(io.pravega.client.stream.StreamCut) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) StreamManager(io.pravega.client.admin.StreamManager) BatchClientFactory(io.pravega.client.BatchClientFactory) StreamInfo(io.pravega.client.admin.StreamInfo) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Segment(io.pravega.client.segment.impl.Segment) Test(org.junit.Test)

Example 20 with StreamManager

use of io.pravega.client.admin.StreamManager in project pravega by pravega.

the class ByteStreamTest method testBlockingRead.

@Test(timeout = 30000)
public void testBlockingRead() throws IOException {
    String scope = "ByteStreamTest";
    String stream = "testBlockingRead";
    StreamConfiguration config = StreamConfiguration.builder().build();
    @Cleanup StreamManager streamManager = new StreamManagerImpl(PRAVEGA.getLocalController(), Mockito.mock(ConnectionPool.class));
    // create a scope
    Boolean createScopeStatus = streamManager.createScope(scope);
    log.info("Create scope status {}", createScopeStatus);
    // create a stream
    Boolean createStreamStatus = streamManager.createStream(scope, stream, config);
    log.info("Create stream status {}", createStreamStatus);
    @Cleanup ByteStreamClientFactory client = createClientFactory(scope);
    byte[] payload = new byte[100];
    Arrays.fill(payload, (byte) 1);
    byte[] readBuffer = new byte[200];
    Arrays.fill(readBuffer, (byte) 0);
    @Cleanup ByteStreamWriter writer = client.createByteStreamWriter(stream);
    @Cleanup ByteStreamReader reader = client.createByteStreamReader(stream);
    AssertExtensions.assertBlocks(() -> {
        assertEquals(100, reader.read(readBuffer));
    }, () -> writer.write(payload));
    assertEquals(1, readBuffer[99]);
    assertEquals(0, readBuffer[100]);
    Arrays.fill(readBuffer, (byte) 0);
    writer.write(payload);
    assertEquals(100, reader.read(readBuffer));
    assertEquals(1, readBuffer[99]);
    assertEquals(0, readBuffer[100]);
    writer.write(payload);
    writer.write(payload);
    assertEquals(200, StreamHelpers.readAll(reader, readBuffer, 0, readBuffer.length));
    AssertExtensions.assertBlocks(() -> {
        assertEquals(100, reader.read(readBuffer));
    }, () -> writer.write(payload));
    writer.closeAndSeal();
    assertEquals(-1, reader.read());
}
Also used : ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) StreamManager(io.pravega.client.admin.StreamManager) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ByteStreamReader(io.pravega.client.byteStream.ByteStreamReader) StreamManagerImpl(io.pravega.client.admin.impl.StreamManagerImpl) Cleanup(lombok.Cleanup) ByteStreamClientFactory(io.pravega.client.ByteStreamClientFactory) ByteStreamWriter(io.pravega.client.byteStream.ByteStreamWriter) Test(org.junit.Test)

Aggregations

StreamManager (io.pravega.client.admin.StreamManager)59 Cleanup (lombok.Cleanup)54 Test (org.junit.Test)44 ClientConfig (io.pravega.client.ClientConfig)32 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)31 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)24 StreamManagerImpl (io.pravega.client.admin.impl.StreamManagerImpl)22 ConnectionPoolImpl (io.pravega.client.connection.impl.ConnectionPoolImpl)20 SocketConnectionFactoryImpl (io.pravega.client.connection.impl.SocketConnectionFactoryImpl)19 ConnectionPool (io.pravega.client.connection.impl.ConnectionPool)18 Stream (io.pravega.client.stream.Stream)17 EventStreamClientFactory (io.pravega.client.EventStreamClientFactory)15 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)15 ReaderGroupManagerImpl (io.pravega.client.admin.impl.ReaderGroupManagerImpl)12 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)12 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)12 URI (java.net.URI)12 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)11 Controller (io.pravega.client.control.impl.Controller)10 StreamImpl (io.pravega.client.stream.impl.StreamImpl)10