use of io.pravega.client.stream.impl.PositionImpl in project pravega by pravega.
the class ReadTest method testEventPositions.
/**
* This test performs concurrent writes, reads and position checks on a Stream. Readers are checkers exercising the
* lazy construction of PositionImpl objects while the internal segmentOffsetUpdates list in EventStreamReaderImpl is
* being updated due to new read events. This test generates enough events to make segmentOffsetUpdates list in
* EventStreamReaderImpl to be filled and cleaned at least once. This test verifies the thread safety of the new
* optimization in EventStreamReaderImpl to prevent generating segmentOffset maps on every event read, as well as
* to check for the correctness of the segment offsets returned by PositionImpl.
*/
@Test(timeout = 60000)
public void testEventPositions() {
String endpoint = "localhost";
String streamName = "eventPositions";
String readerGroup = "groupPositions";
String scope = "scopePositions";
// Generate enough events to make the internal segment offset update buffer in EventStreamReaderImpl to be
// emptied and filled again.
int eventsToWrite = 2000;
BlockingQueue<Entry<Integer, PositionImpl>> readEventsPositions = new ArrayBlockingQueue<>(eventsToWrite);
@Cleanup("shutdown") ScheduledExecutorService readersWritersAndCheckers = ExecutorServiceHelpers.newScheduledThreadPool(4, "readers-writers-checkers");
AtomicInteger finishedProcesses = new AtomicInteger(0);
int port = TestUtils.getAvailableListenPort();
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, NoOpScheduledExecutor.get());
server.startListening();
@Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
@Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().groupRefreshTimeMillis(1000).stream(Stream.of(scope, streamName)).build();
streamManager.createScope(scope);
// Create a Stream with 2 segments.
streamManager.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
streamManager.createReaderGroup(readerGroup, groupConfig);
JavaSerializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", readerGroup, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", readerGroup, serializer, ReaderConfig.builder().build());
// Leave some time for readers to re-balance the segments and acquire one each.
Exceptions.handleInterrupted(() -> Thread.sleep(2000));
// Start writers and readers in parallel.
CompletableFuture reader1Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader1, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture reader2Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader2, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture writerFuture = CompletableFuture.runAsync(() -> {
for (int i = 0; i < eventsToWrite; i++) {
producer.writeEvent("segment1", "a");
producer.writeEvent("segment2", "b");
Exceptions.handleInterrupted(() -> Thread.sleep(1));
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// This process access the positions read by the reader threads, which means that this thread is concurrently
// accessing the shared segmentOffsetUpdates list, whereas readers are appending data to it.
CompletableFuture checkOffsets = CompletableFuture.runAsync(() -> {
// 1-char string is assumed to be the payload of events
int sizeOfEvent = 16;
while (finishedProcesses.get() < 2) {
Entry<Integer, PositionImpl> element;
try {
element = readEventsPositions.poll(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
int numberOfSegments = element.getValue().getOwnedSegmentsWithOffsets().size();
assertEquals("Reader owning too many segments.", 1, numberOfSegments);
// The segment position should increase by sizeOfEvent every event.
long segmentPositionOffset = element.getValue().getOwnedSegmentsWithOffsets().values().iterator().next();
assertEquals("Wrong event position", sizeOfEvent * element.getKey(), segmentPositionOffset);
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// Wait for all futures to complete.
CompletableFuture.allOf(writerFuture, reader1Future, reader2Future, checkOffsets).join();
// Any failure reading, writing or checking positions will make this assertion to fail.
assertEquals(finishedProcesses.get(), 4);
ExecutorServiceHelpers.shutdown(readersWritersAndCheckers);
}
Aggregations