Search in sources :

Example 36 with JavaSerializer

use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.

the class CheckpointTest method testGenerateStreamCuts.

@Test(timeout = 20000)
public void testGenerateStreamCuts() throws Exception {
    String endpoint = "localhost";
    String streamName = "testGenerateStreamCuts";
    String readerName = "reader";
    String readerGroupName = "testGenerateStreamCuts-group";
    int port = TestUtils.getAvailableListenPort();
    String testString = "Hello world\n";
    String scope = "Scope1";
    StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
    @Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, mock(TableStore.class), SERVICE_BUILDER.getLowPriorityExecutor());
    server.startListening();
    @Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
    @Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
    ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, streamName)).build();
    streamManager.createScope(scope);
    streamManager.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build());
    streamManager.createReaderGroup(readerGroupName, groupConfig);
    @Cleanup ReaderGroup readerGroup = streamManager.getReaderGroup(readerGroupName);
    JavaSerializer<String> serializer = new JavaSerializer<>();
    @Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
    producer.writeEvent(testString);
    producer.writeEvent(testString);
    producer.writeEvent(testString);
    producer.flush();
    AtomicLong clock = new AtomicLong();
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader(readerName, readerGroupName, serializer, ReaderConfig.builder().build(), clock::get, clock::get);
    clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
    EventRead<String> read = reader.readNextEvent(60000);
    assertEquals(testString, read.getEvent());
    clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
    read = reader.readNextEvent(60000);
    assertEquals(testString, read.getEvent());
    clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
    @Cleanup("shutdown") final InlineExecutor backgroundExecutor = new InlineExecutor();
    CompletableFuture<Map<Stream, StreamCut>> sc = readerGroup.generateStreamCuts(backgroundExecutor);
    assertFalse(sc.isDone());
    read = reader.readNextEvent(60000);
    assertEquals(testString, read.getEvent());
}
Also used : ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ReaderGroup(io.pravega.client.stream.ReaderGroup) Cleanup(lombok.Cleanup) PravegaConnectionListener(io.pravega.segmentstore.server.host.handler.PravegaConnectionListener) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) Checkpoint(io.pravega.client.stream.Checkpoint) MockClientFactory(io.pravega.client.stream.mock.MockClientFactory) TableStore(io.pravega.segmentstore.contracts.tables.TableStore) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) AtomicLong(java.util.concurrent.atomic.AtomicLong) InlineExecutor(io.pravega.test.common.InlineExecutor) MockStreamManager(io.pravega.client.stream.mock.MockStreamManager) Map(java.util.Map) Test(org.junit.Test)

Example 37 with JavaSerializer

use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.

the class ReadTest method testEventPositions.

/**
 * This test performs concurrent writes, reads and position checks on a Stream. Readers are checkers exercising the
 * lazy construction of PositionImpl objects while the internal segmentOffsetUpdates list in EventStreamReaderImpl is
 * being updated due to new read events. This test generates enough events to make segmentOffsetUpdates list in
 * EventStreamReaderImpl to be filled and cleaned at least once. This test verifies the thread safety of the new
 * optimization in EventStreamReaderImpl to prevent generating segmentOffset maps on every event read, as well as
 * to check for the correctness of the segment offsets returned by PositionImpl.
 */
@Test(timeout = 60000)
public void testEventPositions() {
    String endpoint = "localhost";
    String streamName = "eventPositions";
    String readerGroup = "groupPositions";
    String scope = "scopePositions";
    // Generate enough events to make the internal segment offset update buffer in EventStreamReaderImpl to be
    // emptied and filled again.
    int eventsToWrite = 2000;
    BlockingQueue<Entry<Integer, PositionImpl>> readEventsPositions = new ArrayBlockingQueue<>(eventsToWrite);
    @Cleanup("shutdown") ScheduledExecutorService readersWritersAndCheckers = ExecutorServiceHelpers.newScheduledThreadPool(4, "readers-writers-checkers");
    AtomicInteger finishedProcesses = new AtomicInteger(0);
    int port = TestUtils.getAvailableListenPort();
    StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
    TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
    @Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, NoOpScheduledExecutor.get());
    server.startListening();
    @Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
    @Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
    ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().groupRefreshTimeMillis(1000).stream(Stream.of(scope, streamName)).build();
    streamManager.createScope(scope);
    // Create a Stream with 2 segments.
    streamManager.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
    streamManager.createReaderGroup(readerGroup, groupConfig);
    JavaSerializer<String> serializer = new JavaSerializer<>();
    @Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
    @Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", readerGroup, serializer, ReaderConfig.builder().build());
    @Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", readerGroup, serializer, ReaderConfig.builder().build());
    // Leave some time for readers to re-balance the segments and acquire one each.
    Exceptions.handleInterrupted(() -> Thread.sleep(2000));
    // Start writers and readers in parallel.
    CompletableFuture reader1Future = CompletableFuture.runAsync(() -> {
        readAndQueueEvents(reader1, eventsToWrite, readEventsPositions);
        finishedProcesses.incrementAndGet();
    }, readersWritersAndCheckers);
    CompletableFuture reader2Future = CompletableFuture.runAsync(() -> {
        readAndQueueEvents(reader2, eventsToWrite, readEventsPositions);
        finishedProcesses.incrementAndGet();
    }, readersWritersAndCheckers);
    CompletableFuture writerFuture = CompletableFuture.runAsync(() -> {
        for (int i = 0; i < eventsToWrite; i++) {
            producer.writeEvent("segment1", "a");
            producer.writeEvent("segment2", "b");
            Exceptions.handleInterrupted(() -> Thread.sleep(1));
        }
        finishedProcesses.incrementAndGet();
    }, readersWritersAndCheckers);
    // This process access the positions read by the reader threads, which means that this thread is concurrently
    // accessing the shared segmentOffsetUpdates list, whereas readers are appending data to it.
    CompletableFuture checkOffsets = CompletableFuture.runAsync(() -> {
        // 1-char string is assumed to be the payload of events
        int sizeOfEvent = 16;
        while (finishedProcesses.get() < 2) {
            Entry<Integer, PositionImpl> element;
            try {
                element = readEventsPositions.poll(1, TimeUnit.MINUTES);
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }
            int numberOfSegments = element.getValue().getOwnedSegmentsWithOffsets().size();
            assertEquals("Reader owning too many segments.", 1, numberOfSegments);
            // The segment position should increase by sizeOfEvent every event.
            long segmentPositionOffset = element.getValue().getOwnedSegmentsWithOffsets().values().iterator().next();
            assertEquals("Wrong event position", sizeOfEvent * element.getKey(), segmentPositionOffset);
        }
        finishedProcesses.incrementAndGet();
    }, readersWritersAndCheckers);
    // Wait for all futures to complete.
    CompletableFuture.allOf(writerFuture, reader1Future, reader2Future, checkOffsets).join();
    // Any failure reading, writing or checking positions will make this assertion to fail.
    assertEquals(finishedProcesses.get(), 4);
    ExecutorServiceHelpers.shutdown(readersWritersAndCheckers);
}
Also used : PositionImpl(io.pravega.client.stream.impl.PositionImpl) Cleanup(lombok.Cleanup) PravegaConnectionListener(io.pravega.segmentstore.server.host.handler.PravegaConnectionListener) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) MockClientFactory(io.pravega.client.stream.mock.MockClientFactory) Entry(java.util.Map.Entry) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TableStore(io.pravega.segmentstore.contracts.tables.TableStore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockStreamManager(io.pravega.client.stream.mock.MockStreamManager) Test(org.junit.Test)

Aggregations

JavaSerializer (io.pravega.client.stream.impl.JavaSerializer)37 Cleanup (lombok.Cleanup)34 Test (org.junit.Test)34 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)22 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)21 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)19 ClientConfig (io.pravega.client.ClientConfig)17 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)16 StreamSegmentStore (io.pravega.segmentstore.contracts.StreamSegmentStore)16 TableStore (io.pravega.segmentstore.contracts.tables.TableStore)16 PravegaConnectionListener (io.pravega.segmentstore.server.host.handler.PravegaConnectionListener)16 SocketConnectionFactoryImpl (io.pravega.client.connection.impl.SocketConnectionFactoryImpl)15 EventWriterConfig (io.pravega.client.stream.EventWriterConfig)15 CompletableFuture (java.util.concurrent.CompletableFuture)15 Slf4j (lombok.extern.slf4j.Slf4j)15 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)14 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)14 Controller (io.pravega.client.control.impl.Controller)14 Stream (io.pravega.client.stream.Stream)14 Map (java.util.Map)14