use of io.pravega.client.stream.mock.MockClientFactory in project pravega by pravega.
the class ReaderGroupTest method testMultiSegmentsPerReader.
@Test(timeout = 10000)
public void testMultiSegmentsPerReader() throws Exception {
String endpoint = "localhost";
int servicePort = TestUtils.getAvailableListenPort();
@Cleanup ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
server.startListening();
@Cleanup MockStreamManager streamManager = new MockStreamManager(SCOPE, endpoint, servicePort);
streamManager.createScope(SCOPE);
streamManager.createStream(SCOPE, STREAM_NAME, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
@Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(-1).stream(Stream.of(SCOPE, STREAM_NAME)).build();
streamManager.createReaderGroup(READER_GROUP, groupConfig);
writeEvents(100, clientFactory);
new ReaderThread(100, "Reader", clientFactory).run();
streamManager.deleteReaderGroup(READER_GROUP);
}
use of io.pravega.client.stream.mock.MockClientFactory in project pravega by pravega.
the class ReadTest method testEventPointer.
@Test(timeout = 10000)
public void testEventPointer() throws ReinitializationRequiredException, NoSuchEventException {
String endpoint = "localhost";
String streamName = "testEventPointer";
String readerName = "reader";
String readerGroup = "testEventPointer-group";
int port = TestUtils.getAvailableListenPort();
String testString = "Hello world ";
String scope = "Scope1";
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, NoOpScheduledExecutor.get());
server.startListening();
@Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
@Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, streamName)).build();
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, null);
streamManager.createReaderGroup(readerGroup, groupConfig);
JavaSerializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
for (int i = 0; i < 100; i++) {
producer.writeEvent(testString + i);
}
producer.flush();
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(readerName, readerGroup, serializer, ReaderConfig.builder().build());
for (int i = 0; i < 100; i++) {
EventPointer pointer = reader.readNextEvent(5000).getEventPointer();
String read = reader.fetchEvent(pointer);
assertEquals(testString + i, read);
}
}
use of io.pravega.client.stream.mock.MockClientFactory in project pravega by pravega.
the class ReadTest method testEventPositions.
/**
* This test performs concurrent writes, reads and position checks on a Stream. Readers are checkers exercising the
* lazy construction of PositionImpl objects while the internal segmentOffsetUpdates list in EventStreamReaderImpl is
* being updated due to new read events. This test generates enough events to make segmentOffsetUpdates list in
* EventStreamReaderImpl to be filled and cleaned at least once. This test verifies the thread safety of the new
* optimization in EventStreamReaderImpl to prevent generating segmentOffset maps on every event read, as well as
* to check for the correctness of the segment offsets returned by PositionImpl.
*/
@Test(timeout = 60000)
public void testEventPositions() {
String endpoint = "localhost";
String streamName = "eventPositions";
String readerGroup = "groupPositions";
String scope = "scopePositions";
// Generate enough events to make the internal segment offset update buffer in EventStreamReaderImpl to be
// emptied and filled again.
int eventsToWrite = 2000;
BlockingQueue<Entry<Integer, PositionImpl>> readEventsPositions = new ArrayBlockingQueue<>(eventsToWrite);
@Cleanup("shutdown") ScheduledExecutorService readersWritersAndCheckers = ExecutorServiceHelpers.newScheduledThreadPool(4, "readers-writers-checkers");
AtomicInteger finishedProcesses = new AtomicInteger(0);
int port = TestUtils.getAvailableListenPort();
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, NoOpScheduledExecutor.get());
server.startListening();
@Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
@Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().groupRefreshTimeMillis(1000).stream(Stream.of(scope, streamName)).build();
streamManager.createScope(scope);
// Create a Stream with 2 segments.
streamManager.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
streamManager.createReaderGroup(readerGroup, groupConfig);
JavaSerializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", readerGroup, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", readerGroup, serializer, ReaderConfig.builder().build());
// Leave some time for readers to re-balance the segments and acquire one each.
Exceptions.handleInterrupted(() -> Thread.sleep(2000));
// Start writers and readers in parallel.
CompletableFuture reader1Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader1, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture reader2Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader2, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture writerFuture = CompletableFuture.runAsync(() -> {
for (int i = 0; i < eventsToWrite; i++) {
producer.writeEvent("segment1", "a");
producer.writeEvent("segment2", "b");
Exceptions.handleInterrupted(() -> Thread.sleep(1));
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// This process access the positions read by the reader threads, which means that this thread is concurrently
// accessing the shared segmentOffsetUpdates list, whereas readers are appending data to it.
CompletableFuture checkOffsets = CompletableFuture.runAsync(() -> {
// 1-char string is assumed to be the payload of events
int sizeOfEvent = 16;
while (finishedProcesses.get() < 2) {
Entry<Integer, PositionImpl> element;
try {
element = readEventsPositions.poll(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
int numberOfSegments = element.getValue().getOwnedSegmentsWithOffsets().size();
assertEquals("Reader owning too many segments.", 1, numberOfSegments);
// The segment position should increase by sizeOfEvent every event.
long segmentPositionOffset = element.getValue().getOwnedSegmentsWithOffsets().values().iterator().next();
assertEquals("Wrong event position", sizeOfEvent * element.getKey(), segmentPositionOffset);
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// Wait for all futures to complete.
CompletableFuture.allOf(writerFuture, reader1Future, reader2Future, checkOffsets).join();
// Any failure reading, writing or checking positions will make this assertion to fail.
assertEquals(finishedProcesses.get(), 4);
ExecutorServiceHelpers.shutdown(readersWritersAndCheckers);
}
use of io.pravega.client.stream.mock.MockClientFactory in project pravega by pravega.
the class WatermarkReaderImplTest method testUpdates.
@Test
public void testUpdates() {
Stream stream = new StreamImpl("Scope", "streamName");
MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("Scope", segmentStreamFactory);
String markStream = NameUtils.getMarkStreamForStream("streamName");
createScopeAndStream("Scope", markStream, clientFactory.getController());
RevisionedStreamClient<Watermark> writer = clientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
InlineExecutor executor = new InlineExecutor();
@Cleanup WatermarkReaderImpl impl = new WatermarkReaderImpl(stream, writer, executor);
SegmentWithRange s0 = new SegmentWithRange(new Segment(stream.getScope(), stream.getStreamName(), 0), 0, 0.5);
SegmentWithRange s1 = new SegmentWithRange(new Segment(stream.getScope(), stream.getStreamName(), 1), 0.5, 1);
SegmentWithRange s2 = new SegmentWithRange(new Segment(stream.getScope(), stream.getStreamName(), 2), 0, 0.5);
SegmentWithRange s3 = new SegmentWithRange(new Segment(stream.getScope(), stream.getStreamName(), 3), 0.5, 1);
Map<SegmentWithRange, Long> m1 = ImmutableMap.of(s0, 0L, s1, 0L);
Map<SegmentWithRange, Long> m2 = ImmutableMap.of(s0, 2L, s1, 0L);
Map<SegmentWithRange, Long> m3 = ImmutableMap.of(s0, 2L, s1, 2L);
Map<SegmentWithRange, Long> m4 = ImmutableMap.of(s2, 0L, s1, 2L);
Map<SegmentWithRange, Long> m5 = ImmutableMap.of(s2, 4L, s1, 2L);
Map<SegmentWithRange, Long> m6 = ImmutableMap.of(s2, 4L, s1, 4L);
Map<SegmentWithRange, Long> m7 = ImmutableMap.of(s2, 4L, s3, 0L);
Map<SegmentWithRange, Long> m8 = ImmutableMap.of(s2, 6L, s3, 4L);
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m1)).lowerTimeBound(10).upperTimeBound(19).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m2)).lowerTimeBound(20).upperTimeBound(29).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m3)).lowerTimeBound(30).upperTimeBound(39).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m4)).lowerTimeBound(40).upperTimeBound(49).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m5)).lowerTimeBound(50).upperTimeBound(59).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m6)).lowerTimeBound(60).upperTimeBound(69).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m7)).lowerTimeBound(70).upperTimeBound(79).build());
writer.writeUnconditionally(Watermark.builder().streamCut(convert(m8)).lowerTimeBound(80).upperTimeBound(89).build());
assertEquals(null, impl.getTimeWindow().getLowerTimeBound());
assertEquals(null, impl.getTimeWindow().getUpperTimeBound());
impl.advanceTo(ImmutableMap.of(s0, 1L, s1, 0L));
assertEquals(10, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(29, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s0, 3L, s1, 0L));
assertEquals(20, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(49, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s0, 5L, s1, 0L));
assertEquals(20, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(49, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s0, 6L, s1, 0L));
assertEquals(20, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(49, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s0, 6L, s1, 1L));
assertEquals(20, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(49, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s0, 6L, s1, 3L));
assertEquals(30, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(69, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 0L, s1, 3L));
assertEquals(40, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(69, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 4L, s1, 3L));
assertEquals(50, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(69, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 4L, s1, 5L));
assertEquals(60, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(79, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 4L, s3, 1L));
assertEquals(70, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(89, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 5L, s3, 1L));
assertEquals(70, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(89, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 5L, s3, 5L));
assertEquals(70, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(89, impl.getTimeWindow().getUpperTimeBound().longValue());
impl.advanceTo(ImmutableMap.of(s2, 6L, s3, 5L));
assertEquals(80, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(null, impl.getTimeWindow().getUpperTimeBound());
impl.advanceTo(ImmutableMap.of(s2, 7L, s3, 7L));
assertEquals(80, impl.getTimeWindow().getLowerTimeBound().longValue());
assertEquals(null, impl.getTimeWindow().getUpperTimeBound());
}
use of io.pravega.client.stream.mock.MockClientFactory in project pravega by pravega.
the class SynchronizerTest method testCompaction.
@Test(timeout = 20000)
public void testCompaction() {
String streamName = "streamName";
String scope = "scope";
MockSegmentStreamFactory ioFactory = new MockSegmentStreamFactory();
@Cleanup MockClientFactory clientFactory = new MockClientFactory(scope, ioFactory);
createScopeAndStream(streamName, scope, clientFactory.getController());
StateSynchronizer<RevisionedImpl> sync = clientFactory.createStateSynchronizer(streamName, new JavaSerializer<>(), new JavaSerializer<>(), SynchronizerConfig.builder().build());
assertEquals(0, sync.bytesWrittenSinceCompaction());
AtomicInteger callCount = new AtomicInteger(0);
sync.initialize(new RegularUpdate("a"));
sync.updateState((state, updates) -> {
callCount.incrementAndGet();
updates.add(new RegularUpdate("b"));
});
assertEquals(sync.getState().getValue(), "b");
assertEquals(1, callCount.get());
long size = sync.bytesWrittenSinceCompaction();
assertTrue(size > 0);
sync.updateState((state, updates) -> {
callCount.incrementAndGet();
updates.add(new RegularUpdate("c"));
});
assertEquals(sync.getState().getValue(), "c");
assertEquals(2, callCount.get());
assertTrue(sync.bytesWrittenSinceCompaction() > size);
sync.compact(state -> {
callCount.incrementAndGet();
return new RegularUpdate("c");
});
assertEquals(sync.getState().getValue(), "c");
assertEquals(3, callCount.get());
assertEquals(0, sync.bytesWrittenSinceCompaction());
sync.updateState((state, updates) -> {
callCount.incrementAndGet();
updates.add(new RegularUpdate("e"));
});
assertEquals(sync.getState().getValue(), "e");
assertEquals(5, callCount.get());
assertEquals(size, sync.bytesWrittenSinceCompaction());
sync.compact(state -> {
callCount.incrementAndGet();
return new RegularUpdate("e");
});
assertEquals(sync.getState().getValue(), "e");
assertEquals(6, callCount.get());
assertEquals(0, sync.bytesWrittenSinceCompaction());
}
Aggregations