Search in sources :

Example 16 with EventStreamClientFactory

use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.

the class BatchClientTest method listAndReadSegmentsUsingBatchClient.

protected void listAndReadSegmentsUsingBatchClient(String scopeName, String streamName, ClientConfig config) throws InterruptedException, ExecutionException {
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scopeName, config);
    createTestStreamWithEvents(clientFactory);
    log.info("Done creating test event stream with test events");
    @Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(scopeName, config);
    // List out all the segments in the stream.
    ArrayList<SegmentRange> segments = Lists.newArrayList(batchClient.getSegments(Stream.of(scopeName, streamName), null, null).getIterator());
    assertEquals("Expected number of segments", 6, segments.size());
    // Batch read all events from stream.
    List<String> batchEventList = new ArrayList<>();
    segments.forEach(segInfo -> {
        @Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(segInfo, serializer);
        batchEventList.addAll(Lists.newArrayList(segmentIterator));
    });
    assertEquals("Event count", 9, batchEventList.size());
    // Read from a given offset.
    Segment seg0 = new Segment(scopeName, streamName, 0);
    SegmentRange seg0Info = SegmentRangeImpl.builder().segment(seg0).startOffset(60).endOffset(90).build();
    @Cleanup SegmentIterator<String> seg0Iterator = batchClient.readSegment(seg0Info, serializer);
    ArrayList<String> dataAtOffset = Lists.newArrayList(seg0Iterator);
    assertEquals(1, dataAtOffset.size());
    assertEquals(DATA_OF_SIZE_30, dataAtOffset.get(0));
}
Also used : SegmentRange(io.pravega.client.batch.SegmentRange) BatchClientFactory(io.pravega.client.BatchClientFactory) ArrayList(java.util.ArrayList) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Segment(io.pravega.client.segment.impl.Segment)

Example 17 with EventStreamClientFactory

use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.

the class ControllerMetricsTest method streamMetricsTest.

/**
 * This test verifies that the appropriate metrics for Stream operations are updated correctly (counters, latency
 * histograms). Note that this test performs "at least" assertions on metrics as in an environment with concurrent
 * tests running, it might be possible that metrics get updated by other tests.
 */
@Test(timeout = 300000)
public void streamMetricsTest() throws Exception {
    // make unique scope to improve the test isolation.
    final String scope = "controllerMetricsTestScope" + RandomFactory.getSeed();
    final String streamName = "controllerMetricsTestStream";
    final String readerGroupName = "RGControllerMetricsTestStream";
    final int parallelism = 4;
    final int eventsWritten = 10;
    int iterations = 3;
    // At this point, we have at least 6 internal streams.
    StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
    @Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
    streamManager.createScope(scope);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build());
    @Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, controllerURI);
    for (int i = 0; i < iterations; i++) {
        final String iterationStreamName = streamName + i;
        final String iterationReaderGroupName = readerGroupName + RandomFactory.getSeed();
        // Check that the number of streams in metrics has been incremented.
        streamManager.createStream(scope, iterationStreamName, streamConfiguration);
        Counter createdStreamsCounter = MetricRegistryUtils.getCounter(CREATE_STREAM);
        AssertExtensions.assertGreaterThanOrEqual("The counter of created streams", i, (long) createdStreamsCounter.count());
        groupManager.createReaderGroup(iterationReaderGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + iterationStreamName).build());
        for (long j = 1; j < iterations + 1; j++) {
            @Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(iterationReaderGroupName);
            // Update the Stream and check that the number of updated streams and per-stream updates is incremented.
            streamManager.updateStream(scope, iterationStreamName, streamConfiguration);
            Counter updatedStreamsCounter = MetricRegistryUtils.getCounter(globalMetricName(UPDATE_STREAM));
            Counter streamUpdatesCounter = MetricRegistryUtils.getCounter(UPDATE_STREAM, streamTags(scope, iterationStreamName));
            Assert.assertTrue(iterations * i + j <= updatedStreamsCounter.count());
            Assert.assertTrue(j == streamUpdatesCounter.count());
            // Read and write some events.
            writeEvents(clientFactory, iterationStreamName, eventsWritten);
            Futures.allOf(readEvents(clientFactory, iterationReaderGroupName, parallelism));
            // Get a StreamCut for truncating the Stream.
            StreamCut streamCut = readerGroup.generateStreamCuts(executorService()).join().get(Stream.of(scope, iterationStreamName));
            // Truncate the Stream and check that the number of truncated Streams and per-Stream truncations is incremented.
            streamManager.truncateStream(scope, iterationStreamName, streamCut);
            Counter streamTruncationCounter = MetricRegistryUtils.getCounter(globalMetricName(TRUNCATE_STREAM));
            Counter perStreamTruncationCounter = MetricRegistryUtils.getCounter(TRUNCATE_STREAM, streamTags(scope, iterationStreamName));
            Assert.assertTrue(iterations * i + j <= streamTruncationCounter.count());
            Assert.assertTrue(j == perStreamTruncationCounter.count());
        }
        // Check metrics accounting for sealed and deleted streams.
        streamManager.sealStream(scope, iterationStreamName);
        Counter streamSealCounter = MetricRegistryUtils.getCounter(SEAL_STREAM);
        Assert.assertTrue(i + 1 <= streamSealCounter.count());
        streamManager.deleteStream(scope, iterationStreamName);
        Counter streamDeleteCounter = MetricRegistryUtils.getCounter(DELETE_STREAM);
        Assert.assertTrue(i + 1 <= streamDeleteCounter.count());
    }
    // Put assertion on different lines so it can tell more information in case of failure.
    Timer latencyValues1 = MetricRegistryUtils.getTimer(CREATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues1);
    AssertExtensions.assertGreaterThanOrEqual("Number of iterations and latency count do not match.", iterations, latencyValues1.count());
    Timer latencyValues2 = MetricRegistryUtils.getTimer(SEAL_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues2);
    Assert.assertEquals(iterations, latencyValues2.count());
    Timer latencyValues3 = MetricRegistryUtils.getTimer(DELETE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues3);
    Assert.assertEquals(iterations, latencyValues3.count());
    Timer latencyValues4 = MetricRegistryUtils.getTimer(UPDATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues4);
    Assert.assertEquals(iterations * iterations, latencyValues4.count());
    Timer latencyValues5 = MetricRegistryUtils.getTimer(TRUNCATE_STREAM_LATENCY);
    Assert.assertNotNull(latencyValues5);
    Assert.assertEquals(iterations * iterations, latencyValues5.count());
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Counter(io.micrometer.core.instrument.Counter) StreamCut(io.pravega.client.stream.StreamCut) Timer(io.micrometer.core.instrument.Timer) StreamManager(io.pravega.client.admin.StreamManager) ReaderGroup(io.pravega.client.stream.ReaderGroup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) Test(org.junit.Test)

Example 18 with EventStreamClientFactory

use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.

the class DelegationTokenTest method testDelegationTokenGetsRenewedAfterExpiry.

/**
 * This test verifies that a event stream reader continues to read events as a result of automatic delegation token
 * renewal, after the initial delegation token it uses expires.
 *
 * We use an extraordinarily high test timeout and read timeouts to account for any inordinate delays that may be
 * encountered in testing environments.
 */
@Test(timeout = 50000)
public void testDelegationTokenGetsRenewedAfterExpiry() throws InterruptedException {
    // Delegation token renewal threshold is 5 seconds, so we are using 6 seconds as Token TTL so that token doesn't
    // get renewed before each use.
    ClusterWrapper pravegaCluster = ClusterWrapper.builder().authEnabled(true).tokenTtlInSeconds(6).build();
    try {
        pravegaCluster.start();
        final String scope = "testscope";
        final String streamName = "teststream";
        final int numSegments = 1;
        final ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create(pravegaCluster.controllerUri())).credentials(new DefaultCredentials("1111_aaaa", "admin")).build();
        log.debug("Done creating client config.");
        createScopeStream(scope, streamName, numSegments, clientConfig);
        @Cleanup final EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
        // Perform writes on a separate thread.
        Runnable runnable = () -> {
            @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build());
            for (int i = 0; i < 10; i++) {
                String msg = "message: " + i;
                writer.writeEvent(msg).join();
                log.debug("Done writing message '{}' to stream '{} / {}'", msg, scope, streamName);
            }
        };
        @Cleanup("interrupt") Thread writerThread = new Thread(runnable);
        writerThread.start();
        // Now, read the events from the stream.
        String readerGroup = UUID.randomUUID().toString().replace("-", "");
        ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).disableAutomaticCheckpoints().build();
        @Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, clientConfig);
        readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig);
        @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", readerGroup, new JavaSerializer<String>(), ReaderConfig.builder().build());
        int j = 0;
        EventRead<String> event = null;
        do {
            event = reader.readNextEvent(2000);
            if (event.getEvent() != null) {
                log.info("Done reading event: {}", event.getEvent());
                j++;
            }
            // We are keeping sleep time relatively large, just to make sure that the delegation token expires
            // midway.
            Thread.sleep(500);
        } while (event.getEvent() != null);
        // Assert that we end up reading 10 events even though delegation token must have expired midway.
        // 
        // To look for evidence of delegation token renewal check the logs for the following message:
        // - "Token is nearing expiry, so refreshing it"
        assertSame(10, j);
    } finally {
        pravegaCluster.close();
    }
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) ClusterWrapper(io.pravega.test.integration.demo.ClusterWrapper) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) DefaultCredentials(io.pravega.shared.security.auth.DefaultCredentials) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) ClientConfig(io.pravega.client.ClientConfig) Test(org.junit.Test)

Example 19 with EventStreamClientFactory

use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.

the class DelegationTokenTest method writeTenEvents.

private void writeTenEvents(String scope, String streamName, ClientConfig clientConfig) {
    @Cleanup final EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
    // Perform writes on a separate thread.
    @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
    for (int i = 0; i < 10; i++) {
        String msg = "message: " + i;
        writer.writeEvent(msg);
        log.debug("Done writing message '{}' to stream '{} / {}'", msg, scope, streamName);
    }
    writer.flush();
}
Also used : EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup)

Example 20 with EventStreamClientFactory

use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.

the class TestUtils method writeDataToStream.

/**
 * Write the specified number of messages to the specified {@code scope}/{@code stream}, using the
 * provided {@writerClientConfig}.
 *
 * @param scope the scope
 * @param stream the stream
 * @param message the event message to write. If it is null, a default message will be used.
 * @param numMessages the number of event messages to write
 * @param writerClientConfig the {@link ClientConfig} object to use to connect to the server
 * @throws NullPointerException if {@code scope} or {@code stream} or {@writerClientConfig} is null
 * @throws IllegalArgumentException if {@code numMessages} < 1
 * @throws RuntimeException if any exception is thrown by the client
 */
public static void writeDataToStream(@NonNull String scope, @NonNull String stream, String message, int numMessages, @NonNull ClientConfig writerClientConfig) {
    Preconditions.checkArgument(numMessages > 0);
    if (message == null) {
        message = "Test message";
    }
    @Cleanup final EventStreamClientFactory writerClientFactory = EventStreamClientFactory.withScope(scope, writerClientConfig);
    @Cleanup final EventStreamWriter<String> writer = writerClientFactory.createEventWriter(stream, new JavaSerializer<String>(), EventWriterConfig.builder().build());
    for (int i = 0; i < numMessages; i++) {
        writer.writeEvent(message);
    }
    writer.flush();
    log.info("Wrote {} message(s) to the stream {}/{}", numMessages, scope, stream);
}
Also used : EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup)

Aggregations

EventStreamClientFactory (io.pravega.client.EventStreamClientFactory)57 Cleanup (lombok.Cleanup)50 Test (org.junit.Test)41 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)36 ClientConfig (io.pravega.client.ClientConfig)21 ReaderGroup (io.pravega.client.stream.ReaderGroup)19 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)19 StreamCut (io.pravega.client.stream.StreamCut)19 HashMap (java.util.HashMap)18 StreamManager (io.pravega.client.admin.StreamManager)17 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)16 Stream (io.pravega.client.stream.Stream)16 Map (java.util.Map)16 Segment (io.pravega.client.segment.impl.Segment)13 EventWriterConfig (io.pravega.client.stream.EventWriterConfig)12 Controller (io.pravega.client.control.impl.Controller)11 EventStreamWriter (io.pravega.client.stream.EventStreamWriter)10 JavaSerializer (io.pravega.client.stream.impl.JavaSerializer)10 Futures (io.pravega.common.concurrent.Futures)10 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)9