Search in sources :

Example 1 with ReaderGroupManagerImpl

use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.

the class EndToEndTxnWithScaleTest method testTxnWithScale.

@Test(timeout = 10000)
public void testTxnWithScale() throws Exception {
    StreamConfiguration config = StreamConfiguration.builder().scope("test").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
    Controller controller = controllerWrapper.getController();
    controllerWrapper.getControllerService().createScope("test").get();
    controller.createStream(config).get();
    @Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
    @Cleanup ClientFactory clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
    @Cleanup EventStreamWriter<String> test = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutScaleGracePeriod(10000).transactionTimeoutTime(10000).build());
    Transaction<String> transaction = test.beginTxn();
    transaction.writeEvent("0", "txntest1");
    transaction.commit();
    // scale
    Stream stream = new StreamImpl("test", "test");
    Map<Double, Double> map = new HashMap<>();
    map.put(0.0, 0.33);
    map.put(0.33, 0.66);
    map.put(0.66, 1.0);
    Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executorService).getFuture().get();
    assertTrue(result);
    transaction = test.beginTxn();
    transaction.writeEvent("0", "txntest2");
    transaction.commit();
    @Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory);
    groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").build());
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
    EventRead<String> event = reader.readNextEvent(10000);
    assertNotNull(event);
    assertEquals("txntest1", event.getEvent());
    event = reader.readNextEvent(10000);
    assertNotNull(event);
    assertEquals("txntest2", event.getEvent());
}
Also used : ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) HashMap(java.util.HashMap) ClientFactory(io.pravega.client.ClientFactory) Controller(io.pravega.client.stream.impl.Controller) Cleanup(lombok.Cleanup) ConnectionFactory(io.pravega.client.netty.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) StreamImpl(io.pravega.client.stream.impl.StreamImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) ConnectionFactoryImpl(io.pravega.client.netty.impl.ConnectionFactoryImpl) Test(org.junit.Test)

Example 2 with ReaderGroupManagerImpl

use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.

the class ReadWriteTest method readWriteTest.

@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
    String scope = "testMultiReaderWriterScope";
    String readerGroupName = "testMultiReaderWriterReaderGroup";
    // 20  readers -> 20 stream segments ( to have max read parallelism)
    ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
    eventsReadFromPravega = new ConcurrentLinkedQueue<>();
    // data used by each of the writers.
    eventData = new AtomicLong();
    // used by readers to maintain a count of events.
    eventReadCount = new AtomicLong();
    stopReadFlag = new AtomicBoolean(false);
    ClientConfig clientConfig = ClientConfig.builder().build();
    try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
        StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
        // create a scope
        Boolean createScopeStatus = streamManager.createScope(scope);
        log.info("Create scope status {}", createScopeStatus);
        // create a stream
        Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
        log.info("Create stream status {}", createStreamStatus);
    }
    try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
        ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
        // start writing events to the stream
        log.info("Creating {} writers", NUM_WRITERS);
        List<CompletableFuture<Void>> writerList = new ArrayList<>();
        for (int i = 0; i < NUM_WRITERS; i++) {
            log.info("Starting writer{}", i);
            writerList.add(startNewWriter(eventData, clientFactory));
        }
        // create a reader group
        log.info("Creating Reader group : {}", readerGroupName);
        readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
        log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
        log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
        // create readers
        log.info("Creating {} readers", NUM_READERS);
        List<CompletableFuture<Void>> readerList = new ArrayList<>();
        String readerName = "reader" + RandomFactory.create().nextInt(Integer.MAX_VALUE);
        // start reading events
        for (int i = 0; i < NUM_READERS; i++) {
            log.info("Starting reader{}", i);
            readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
        }
        // wait for writers completion
        Futures.allOf(writerList).get();
        ExecutorServiceHelpers.shutdown(writerPool);
        // set stop read flag to true
        stopReadFlag.set(true);
        // wait for readers completion
        Futures.allOf(readerList).get();
        ExecutorServiceHelpers.shutdown(readerPool);
        // delete readergroup
        log.info("Deleting readergroup {}", readerGroupName);
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
    assertEquals(TOTAL_NUM_EVENTS, eventsReadFromPravega.size());
    // check unique events.
    assertEquals(TOTAL_NUM_EVENTS, new TreeSet<>(eventsReadFromPravega).size());
    // seal the stream
    CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
    log.info("Sealing stream {}", STREAM_NAME);
    assertTrue(sealStreamStatus.get());
    // delete the stream
    CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
    log.info("Deleting stream {}", STREAM_NAME);
    assertTrue(deleteStreamStatus.get());
    // delete the  scope
    CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
    log.info("Deleting scope {}", scope);
    assertTrue(deleteScopeStatus.get());
    log.info("Read write test succeeds");
}
Also used : ConnectionPool(io.pravega.client.connection.impl.ConnectionPool) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) ConnectionPoolImpl(io.pravega.client.connection.impl.ConnectionPoolImpl) ArrayList(java.util.ArrayList) StreamManagerImpl(io.pravega.client.admin.impl.StreamManagerImpl) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) CompletableFuture(java.util.concurrent.CompletableFuture) StreamManager(io.pravega.client.admin.StreamManager) TreeSet(java.util.TreeSet) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ClientConfig(io.pravega.client.ClientConfig) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) Test(org.junit.Test)

Example 3 with ReaderGroupManagerImpl

use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.

the class LargeEventTest method testReadWriteWithConnectionReconnect.

@Test(timeout = 60000)
public void testReadWriteWithConnectionReconnect() throws ExecutionException, InterruptedException, TimeoutException {
    String readerGroupName = "testLargeEventReconnectReaderGroup";
    String streamName = "ConnectionReconnect";
    StreamConfiguration config = getStreamConfiguration(NUM_READERS);
    createScopeStream(SCOPE_NAME, streamName, config);
    int numEvents = 1;
    Map<Integer, List<ByteBuffer>> data = generateEventData(NUM_WRITERS, 0, numEvents, LARGE_EVENT_SIZE);
    merge(eventsWrittenToPravega, data);
    Queue<ByteBuffer> reads = new ConcurrentLinkedQueue<>();
    // Close the connection after two `send` calls.
    AtomicInteger sendCount = new AtomicInteger(0);
    Supplier<Boolean> predicate = () -> sendCount.getAndIncrement() == CLOSE_WRITE_COUNT;
    AtomicReference<Boolean> latch = new AtomicReference<>(true);
    try (ConnectionExporter connectionFactory = new ConnectionExporter(ClientConfig.builder().build(), latch, null, predicate);
        ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
        // Start writing events to the stream.
        val writers = createEventWriters(streamName, NUM_WRITERS, clientFactory, eventsWrittenToPravega);
        // Create a ReaderGroup.
        createReaderGroup(readerGroupName, readerGroupManager, streamName);
        // Create Readers.
        val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, reads);
        Futures.allOf(writers).get();
        stopReadFlag.set(true);
        Futures.allOf(readers).get();
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    validateCleanUp(streamName);
    validateEventReads(reads, eventsWrittenToPravega);
}
Also used : lombok.val(lombok.val) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) List(java.util.List) ArrayList(java.util.ArrayList) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) Test(org.junit.Test)

Example 4 with ReaderGroupManagerImpl

use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.

the class LargeEventTest method readWriteCycle.

private ConcurrentLinkedQueue<ByteBuffer> readWriteCycle(String streamName, String readerGroupName, Map<Integer, List<ByteBuffer>> writes) throws ExecutionException, InterruptedException {
    // Each read-cycle should clear these objects.
    stopReadFlag.set(false);
    // Reads should be clear, but not writes.
    eventReadCount.set(0);
    // The reads should return all events written, and not just the to be written events (writes).
    ConcurrentLinkedQueue<ByteBuffer> reads = new ConcurrentLinkedQueue<>();
    try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
        ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
        // Start writing events to the stream.
        val writers = createEventWriters(streamName, writes.size(), clientFactory, writes);
        // Create a ReaderGroup.
        createReaderGroup(readerGroupName, readerGroupManager, streamName);
        // Create Readers.
        val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, reads);
        Futures.allOf(writers).get();
        stopReadFlag.set(true);
        Futures.allOf(readers).get();
        log.info("Deleting ReaderGroup: {}", readerGroupName);
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    return reads;
}
Also used : lombok.val(lombok.val) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) ByteBuffer(java.nio.ByteBuffer) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl)

Example 5 with ReaderGroupManagerImpl

use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.

the class LargeEventTest method testReadWriteScaleStreamSeal.

/**
 * Tests that if a Stream is sealed no data will continue to be produced by the {@link io.pravega.client.stream.impl.LargeEventWriter}
 */
@Test(timeout = 60000)
public void testReadWriteScaleStreamSeal() throws ExecutionException, InterruptedException, TimeoutException {
    String readerGroupName = "testLargeEventScaleStreamSealReaderGroup";
    String streamName = "ScaleStreamSeal";
    StreamConfiguration config = StreamConfiguration.builder().retentionPolicy(RetentionPolicy.bySizeBytes(Long.MAX_VALUE)).build();
    createScopeStream(SCOPE_NAME, streamName, config);
    AtomicInteger generation = new AtomicInteger(0);
    // Creates some data to write.
    int events = 1;
    Map<Integer, List<ByteBuffer>> data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
    merge(eventsWrittenToPravega, data);
    // Perform a basic read-write cycle.
    Queue<ByteBuffer> reads = readWriteCycle(streamName, readerGroupName, eventsWrittenToPravega);
    validateEventReads(reads, eventsWrittenToPravega);
    // Define the scale operation.
    Runnable scale = () -> {
        Stream stream = new StreamImpl(SCOPE_NAME, streamName);
        try {
            StreamSegments segments = controller.getCurrentSegments(SCOPE_NAME, streamName).get();
            List<Long> ids = segments.getSegments().stream().map(Segment::getSegmentId).collect(Collectors.toList());
            controller.startScale(stream, ids, Map.of(0.0, 0.5, 0.5, 1.0)).get();
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        }
    };
    // Reset reads.
    eventReadCount.set(0);
    // Try to scale the segment after two `send` calls.
    AtomicInteger sendCount = new AtomicInteger(0);
    Supplier<Boolean> predicate = () -> sendCount.getAndIncrement() == CLOSE_WRITE_COUNT;
    AtomicReference<Boolean> latch = new AtomicReference<>(true);
    try (ConnectionExporter connectionFactory = new ConnectionExporter(ClientConfig.builder().build(), latch, scale, predicate);
        ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
        // Next set of writes.
        data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
        merge(eventsWrittenToPravega, data);
        // Start writing events to the stream.
        val writers = createEventWriters(streamName, NUM_WRITERS, clientFactory, data);
        Futures.allOf(writers).get();
        // Wait for the scale event.
        TestUtils.await(() -> !latch.get(), 200, 2000);
        // Create a ReaderGroup.
        createReaderGroup(readerGroupName, readerGroupManager, streamName);
        // Create Readers.
        val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, eventsReadFromPravega);
        stopReadFlag.set(true);
        Futures.allOf(readers).get();
        log.info("Deleting ReaderGroup: {}", readerGroupName);
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    StreamSegments segments = controller.getCurrentSegments(SCOPE_NAME, streamName).get();
    // Make sure that the scale event has happened.
    Assert.assertEquals("Expected 2 StreamSegments.", 2, segments.getSegments().size());
    // This time there are successor segments, so the data should have been accepted.
    validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
}
Also used : Segment(io.pravega.client.segment.impl.Segment) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) List(java.util.List) ArrayList(java.util.ArrayList) Stream(io.pravega.client.stream.Stream) ExecutionException(java.util.concurrent.ExecutionException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) lombok.val(lombok.val) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamImpl(io.pravega.client.stream.impl.StreamImpl) StreamSegments(io.pravega.client.stream.impl.StreamSegments) Test(org.junit.Test)

Aggregations

ReaderGroupManagerImpl (io.pravega.client.admin.impl.ReaderGroupManagerImpl)42 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)39 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)39 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)38 Test (org.junit.Test)38 SocketConnectionFactoryImpl (io.pravega.client.connection.impl.SocketConnectionFactoryImpl)37 Cleanup (lombok.Cleanup)34 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)33 Stream (io.pravega.client.stream.Stream)22 HashMap (java.util.HashMap)21 ClientConfig (io.pravega.client.ClientConfig)20 StreamImpl (io.pravega.client.stream.impl.StreamImpl)18 ReaderGroup (io.pravega.client.stream.ReaderGroup)17 Controller (io.pravega.client.control.impl.Controller)16 LocalController (io.pravega.controller.server.eventProcessor.LocalController)14 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)11 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)11 StreamManager (io.pravega.client.admin.StreamManager)10 EventWriterConfig (io.pravega.client.stream.EventWriterConfig)10 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)10