use of io.pravega.client.ClientFactory in project pravega by pravega.
the class ReaderGroupStateManagerTest method testReleaseWhenReadersAdded.
// (timeout = 20000)
@Test
public void testReleaseWhenReadersAdded() throws ReinitializationRequiredException {
String scope = "scope";
String stream = "stream";
PravegaNodeUri endpoint = new PravegaNodeUri("localhost", SERVICE_PORT);
MockConnectionFactoryImpl connectionFactory = new MockConnectionFactoryImpl();
MockController controller = new MockController(endpoint.getEndpoint(), endpoint.getPort(), connectionFactory);
MockSegmentStreamFactory streamFactory = new MockSegmentStreamFactory();
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory, streamFactory, streamFactory, streamFactory);
SynchronizerConfig config = SynchronizerConfig.builder().build();
@Cleanup StateSynchronizer<ReaderGroupState> stateSynchronizer = createState(stream, clientFactory, config);
AtomicLong clock = new AtomicLong();
Map<Segment, Long> segments = new HashMap<>();
segments.put(new Segment(scope, stream, 0), 0L);
segments.put(new Segment(scope, stream, 1), 1L);
segments.put(new Segment(scope, stream, 2), 2L);
segments.put(new Segment(scope, stream, 3), 3L);
segments.put(new Segment(scope, stream, 4), 4L);
segments.put(new Segment(scope, stream, 5), 5L);
ReaderGroupStateManager.initializeReaderGroup(stateSynchronizer, ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).build(), segments);
ReaderGroupStateManager reader1 = new ReaderGroupStateManager("reader1", stateSynchronizer, controller, clock::get);
reader1.initializeReader(0);
Map<Segment, Long> segments1 = reader1.acquireNewSegmentsIfNeeded(0);
assertEquals(6, segments1.size());
ReaderGroupStateManager reader2 = new ReaderGroupStateManager("reader2", stateSynchronizer, controller, clock::get);
reader2.initializeReader(0);
assertTrue(reader2.acquireNewSegmentsIfNeeded(0).isEmpty());
assertNull(reader1.findSegmentToReleaseIfRequired());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
assertNotNull(reader1.findSegmentToReleaseIfRequired());
reader1.releaseSegment(new Segment(scope, stream, 3), 3, 0);
assertNull(reader1.findSegmentToReleaseIfRequired());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
assertNotNull(reader1.findSegmentToReleaseIfRequired());
reader1.releaseSegment(new Segment(scope, stream, 4), 4, 0);
assertNull(reader1.findSegmentToReleaseIfRequired());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
assertNotNull(reader1.findSegmentToReleaseIfRequired());
reader1.releaseSegment(new Segment(scope, stream, 5), 5, 0);
assertNull(reader1.findSegmentToReleaseIfRequired());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
assertNull(reader1.findSegmentToReleaseIfRequired());
Map<Segment, Long> segments2 = reader2.acquireNewSegmentsIfNeeded(0);
assertEquals(3, segments2.size());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
ReaderGroupStateManager reader3 = new ReaderGroupStateManager("reader3", stateSynchronizer, controller, clock::get);
reader3.initializeReader(0);
assertTrue(reader3.acquireNewSegmentsIfNeeded(0).isEmpty());
assertNotNull(reader1.findSegmentToReleaseIfRequired());
reader1.releaseSegment(new Segment(scope, stream, 0), 0, 0);
assertNull(reader1.findSegmentToReleaseIfRequired());
assertNotNull(reader2.findSegmentToReleaseIfRequired());
reader2.releaseSegment(new Segment(scope, stream, 3), 3, 0);
assertNull(reader2.findSegmentToReleaseIfRequired());
Map<Segment, Long> segments3 = reader3.acquireNewSegmentsIfNeeded(0);
assertEquals(2, segments3.size());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
assertTrue(reader3.acquireNewSegmentsIfNeeded(0).isEmpty());
assertNull(reader1.findSegmentToReleaseIfRequired());
assertNull(reader2.findSegmentToReleaseIfRequired());
assertNull(reader3.findSegmentToReleaseIfRequired());
}
use of io.pravega.client.ClientFactory in project pravega by pravega.
the class ReaderGroupStateManagerTest method testCompaction.
@Test(timeout = 10000)
public void testCompaction() throws ReinitializationRequiredException {
String scope = "scope";
String stream = "stream";
PravegaNodeUri endpoint = new PravegaNodeUri("localhost", SERVICE_PORT);
MockConnectionFactoryImpl connectionFactory = new MockConnectionFactoryImpl();
MockController controller = new MockController(endpoint.getEndpoint(), endpoint.getPort(), connectionFactory);
MockSegmentStreamFactory streamFactory = new MockSegmentStreamFactory();
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory, streamFactory, streamFactory, streamFactory);
SynchronizerConfig config = SynchronizerConfig.builder().build();
@Cleanup StateSynchronizer<ReaderGroupState> state1 = createState(stream, clientFactory, config);
Segment s1 = new Segment(scope, stream, 1);
Segment s2 = new Segment(scope, stream, 2);
Map<Segment, Long> segments = new HashMap<>();
segments.put(s1, 1L);
segments.put(s2, 2L);
AtomicLong clock = new AtomicLong();
ReaderGroupStateManager.initializeReaderGroup(state1, ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).build(), segments);
ReaderGroupStateManager r1 = new ReaderGroupStateManager("r1", state1, controller, clock::get);
r1.initializeReader(0);
r1.acquireNewSegmentsIfNeeded(0);
assertTrue(state1.getState().getUnassignedSegments().isEmpty());
state1.compact(s -> new ReaderGroupState.CompactReaderGroupState(s));
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
r1.acquireNewSegmentsIfNeeded(0);
state1.compact(s -> new ReaderGroupState.CompactReaderGroupState(s));
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
@Cleanup StateSynchronizer<ReaderGroupState> state2 = createState(stream, clientFactory, config);
ReaderGroupStateManager r2 = new ReaderGroupStateManager("r2", state2, controller, clock::get);
r2.initializeReader(0);
assertEquals(state1.getState().getPositions(), state2.getState().getPositions());
state1.fetchUpdates();
assertTrue(r1.releaseSegment(s1, 1, 1));
state2.fetchUpdates();
assertFalse(state2.getState().getUnassignedSegments().isEmpty());
assertFalse(r2.acquireNewSegmentsIfNeeded(0).isEmpty());
state2.fetchUpdates();
assertTrue(state2.getState().getUnassignedSegments().isEmpty());
assertEquals(Collections.singleton(s2), state2.getState().getSegments("r1"));
assertEquals(Collections.singleton(s1), state2.getState().getSegments("r2"));
state2.compact(s -> new ReaderGroupState.CompactReaderGroupState(s));
r1.findSegmentToReleaseIfRequired();
r1.acquireNewSegmentsIfNeeded(0);
r2.getCheckpoint();
@Cleanup StateSynchronizer<ReaderGroupState> state3 = createState(stream, clientFactory, config);
state3.fetchUpdates();
assertEquals(state3.getState().getPositions(), state1.getState().getPositions());
assertEquals(state3.getState().getPositions(), state2.getState().getPositions());
}
use of io.pravega.client.ClientFactory in project pravega by pravega.
the class ReaderGroupStateManagerTest method testSegmentMerge.
@Test(timeout = 20000)
public void testSegmentMerge() throws ReinitializationRequiredException {
String scope = "scope";
String stream = "stream";
PravegaNodeUri endpoint = new PravegaNodeUri("localhost", SERVICE_PORT);
MockConnectionFactoryImpl connectionFactory = new MockConnectionFactoryImpl();
Segment initialSegmentA = new Segment(scope, stream, 0);
Segment initialSegmentB = new Segment(scope, stream, 1);
Segment successor = new Segment(scope, stream, 2);
MockController controller = new MockControllerWithSuccessors(endpoint.getEndpoint(), endpoint.getPort(), connectionFactory, new StreamSegmentsWithPredecessors(Collections.singletonMap(new SegmentWithRange(successor, 0.0, 1.0), ImmutableList.of(0, 1)), ""));
MockSegmentStreamFactory streamFactory = new MockSegmentStreamFactory();
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory, streamFactory, streamFactory, streamFactory);
SynchronizerConfig config = SynchronizerConfig.builder().build();
@Cleanup StateSynchronizer<ReaderGroupState> stateSynchronizer = createState(stream, clientFactory, config);
Map<Segment, Long> segments = new HashMap<>();
segments.put(initialSegmentA, 1L);
segments.put(initialSegmentB, 2L);
ReaderGroupStateManager.initializeReaderGroup(stateSynchronizer, ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).build(), segments);
val readerState = new ReaderGroupStateManager("testReader", stateSynchronizer, controller, null);
readerState.initializeReader(0);
Map<Segment, Long> newSegments = readerState.acquireNewSegmentsIfNeeded(0);
assertEquals(2, newSegments.size());
assertEquals(Long.valueOf(1), newSegments.get(initialSegmentA));
assertEquals(Long.valueOf(2), newSegments.get(initialSegmentB));
readerState.handleEndOfSegment(initialSegmentA);
newSegments = readerState.acquireNewSegmentsIfNeeded(0);
assertTrue(newSegments.isEmpty());
readerState.handleEndOfSegment(initialSegmentB);
newSegments = readerState.acquireNewSegmentsIfNeeded(0);
assertEquals(1, newSegments.size());
assertEquals(Long.valueOf(0), newSegments.get(successor));
newSegments = readerState.acquireNewSegmentsIfNeeded(0);
assertTrue(newSegments.isEmpty());
}
use of io.pravega.client.ClientFactory in project pravega by pravega.
the class ReadWriteTest method readWriteTest.
@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
String scope = "testMultiReaderWriterScope";
String readerGroupName = "testMultiReaderWriterReaderGroup";
// 20 readers -> 20 stream segments ( to have max read parallelism)
ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
StreamConfiguration config = StreamConfiguration.builder().scope(scope).streamName(STREAM_NAME).scalingPolicy(scalingPolicy).build();
eventsReadFromPravega = new ConcurrentLinkedQueue<>();
// data used by each of the writers.
eventData = new AtomicLong();
// used by readers to maintain a count of events.
eventReadCount = new AtomicLong();
stopReadFlag = new AtomicBoolean(false);
try (StreamManager streamManager = new StreamManagerImpl(controller)) {
// create a scope
Boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactory clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory, connectionFactory)) {
// start writing events to the stream
log.info("Creating {} writers", NUM_WRITERS);
List<CompletableFuture<Void>> writerList = new ArrayList<>();
for (int i = 0; i < NUM_WRITERS; i++) {
log.info("Starting writer{}", i);
writerList.add(startNewWriter(eventData, clientFactory));
}
// create a reader group
log.info("Creating Reader group : {}", readerGroupName);
readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
// create readers
log.info("Creating {} readers", NUM_READERS);
List<CompletableFuture<Void>> readerList = new ArrayList<>();
String readerName = "reader" + new Random().nextInt(Integer.MAX_VALUE);
// start reading events
for (int i = 0; i < NUM_READERS; i++) {
log.info("Starting reader{}", i);
readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
}
// wait for writers completion
Futures.allOf(writerList).get();
// set stop read flag to true
stopReadFlag.set(true);
// wait for readers completion
Futures.allOf(readerList).get();
// delete readergroup
log.info("Deleting readergroup {}", readerGroupName);
readerGroupManager.deleteReaderGroup(readerGroupName);
}
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
assertEquals(TOTAL_NUM_EVENTS, eventsReadFromPravega.size());
// check unique events.
assertEquals(TOTAL_NUM_EVENTS, new TreeSet<>(eventsReadFromPravega).size());
// seal the stream
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
// delete the stream
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
// delete the scope
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
log.info("Read write test succeeds");
}
use of io.pravega.client.ClientFactory in project pravega by pravega.
the class ReaderGroupNotificationTest method testEndOfStreamNotifications.
@Test(timeout = 40000)
public void testEndOfStreamNotifications() throws Exception {
final String streamName = "stream2";
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(streamName).scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
// scale
Stream stream = new StreamImpl(SCOPE, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "data2").get();
// seal stream
assertTrue(controller.sealStream(SCOPE, streamName).get());
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory, connectionFactory);
ReaderGroup readerGroup = groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, streamName)).build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
// Add segment event listener
Listener<EndOfDataNotification> l1 = notification -> {
listenerInvoked.set(true);
listenerLatch.release();
};
readerGroup.getEndOfDataNotifier(executor).registerListener(l1);
EventRead<String> event1 = reader1.readNextEvent(10000);
EventRead<String> event2 = reader1.readNextEvent(10000);
EventRead<String> event3 = reader1.readNextEvent(10000);
assertNotNull(event1);
assertEquals("data1", event1.getEvent());
assertNotNull(event2);
assertEquals("data2", event2.getEvent());
assertNull(event3.getEvent());
listenerLatch.await();
assertTrue("Listener invoked", listenerInvoked.get());
}
Aggregations