use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class MetricsTest method metricsTimeBasedCacheEvictionTest.
@Test(timeout = 120000)
public void metricsTimeBasedCacheEvictionTest() throws Exception {
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
String event = "12345";
long bytesWritten = TOTAL_NUM_EVENTS * (8 + event.length());
writeEvents(event, writer1);
String readerGroupName1 = readerGroupName + "1";
log.info("Creating Reader group : {}", readerGroupName1);
readerGroupManager.createReaderGroup(readerGroupName1, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader1 = clientFactory.createReader(readerName, readerGroupName1, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader1);
final String[] streamTags = segmentTags(scope + "/" + STREAM_NAME + "/0.#epoch.0");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
// Wait for cache eviction to happen
Thread.sleep(5000);
String readerGroupName2 = readerGroupName + "2";
log.info("Creating Reader group : {}", readerGroupName2);
readerGroupManager.createReaderGroup(readerGroupName2, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader2 = clientFactory.createReader(readerName, readerGroupName2, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader2);
// Metric is evicted from Cache, after cache eviction duration
// Count starts from 0, rather than adding up to previously ready bytes, as cache is evicted.
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
// Seal segment 0, create segment 1
CompletableFuture<Boolean> scaleStatus = controller.scaleStream(new StreamImpl(scope, STREAM_NAME), Collections.singletonList(0L), map, executorService()).getFuture();
Assert.assertTrue(scaleStatus.get());
@Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
writeEvents(event, writer2);
readAllEvents(reader1);
final String[] streamTags2nd = segmentTags(scope + "/" + STREAM_NAME + "/1.#epoch.1");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags2nd).count());
readerGroupManager.deleteReaderGroup(readerGroupName1);
readerGroupManager.deleteReaderGroup(readerGroupName2);
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
}
log.info("Metrics Time based Cache Eviction test succeeds");
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class WatermarkingTest method watermarkingTests.
@Test
public void watermarkingTests() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
// create 2 writers
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
JavaSerializer<Long> javaSerializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<Long> writer1 = clientFactory.createEventWriter(STREAM, javaSerializer, EventWriterConfig.builder().build());
@Cleanup EventStreamWriter<Long> writer2 = clientFactory.createEventWriter(STREAM, javaSerializer, EventWriterConfig.builder().build());
AtomicBoolean stopFlag = new AtomicBoolean(false);
// write events
writeEvents(writer1, stopFlag);
writeEvents(writer2, stopFlag);
// scale the stream several times so that we get complex positions
Stream streamObj = Stream.of(SCOPE, STREAM);
scale(controller, streamObj);
@Cleanup ClientFactoryImpl syncClientFactory = new ClientFactoryImpl(SCOPE, new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor()), connectionFactory);
String markStream = NameUtils.getMarkStreamForStream(STREAM);
RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>();
fetchWatermarks(watermarkReader, watermarks, stopFlag);
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 2, 100000);
// scale down one controller instance.
Futures.getAndHandleExceptions(controllerInstance.scaleService(1), ExecutionException::new);
// wait until at least 2 more watermarks are emitted
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 4, 100000);
stopFlag.set(true);
Watermark watermark0 = watermarks.take();
Watermark watermark1 = watermarks.take();
Watermark watermark2 = watermarks.take();
Watermark watermark3 = watermarks.take();
assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
assertTrue(watermark2.getLowerTimeBound() <= watermark2.getUpperTimeBound());
assertTrue(watermark3.getLowerTimeBound() <= watermark3.getUpperTimeBound());
// verify that watermarks are increasing in time.
assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
assertTrue(watermark1.getLowerTimeBound() < watermark2.getLowerTimeBound());
assertTrue(watermark2.getLowerTimeBound() < watermark3.getLowerTimeBound());
// use watermark as lower and upper bounds.
Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutStart = new StreamCutImpl(streamObj, positionMap0);
Map<Stream, StreamCut> start = Collections.singletonMap(streamObj, streamCutStart);
Map<Segment, Long> positionMap2 = watermark2.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(SCOPE, STREAM, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutEnd = new StreamCutImpl(streamObj, positionMap2);
Map<Stream, StreamCut> end = Collections.singletonMap(streamObj, streamCutEnd);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE, controller, syncClientFactory);
String readerGroup = "rg";
readerGroupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().stream(streamObj).startingStreamCuts(start).endingStreamCuts(end).build());
// create reader on the stream
@Cleanup final EventStreamReader<Long> reader = clientFactory.createReader("myreader", readerGroup, javaSerializer, ReaderConfig.builder().build());
// read events from the reader.
// verify that events read belong to the bound
EventRead<Long> event = reader.readNextEvent(10000L);
AtomicReference<TimeWindow> currentTimeWindow = new AtomicReference<>();
AssertExtensions.assertEventuallyEquals(true, () -> {
currentTimeWindow.set(reader.getCurrentTimeWindow(streamObj));
return currentTimeWindow.get() != null && currentTimeWindow.get().getLowerTimeBound() != null && currentTimeWindow.get().getUpperTimeBound() != null;
}, 100000);
log.info("current time window = {}", currentTimeWindow.get());
while (event.getEvent() != null) {
Long time = event.getEvent();
log.info("event read = {}", time);
event.getPosition();
assertTrue(time >= currentTimeWindow.get().getLowerTimeBound());
event = reader.readNextEvent(10000L);
if (event.isCheckpoint()) {
event = reader.readNextEvent(10000L);
}
}
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class WriteBatchTest method readWriteTest.
@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
String scope = "testBatchWrite";
String readerGroupName = "testBatchWriteRG";
// 20 readers -> 20 stream segments ( to have max read parallelism)
ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
ConcurrentLinkedQueue<Long> eventsReadFromPravega = new ConcurrentLinkedQueue<>();
AtomicLong eventData = new AtomicLong();
AtomicLong eventReadCount = new AtomicLong();
AtomicBoolean stopReadFlag = new AtomicBoolean(false);
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
// create a scope
Boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
// start writing events to the stream
log.info("Creating {} writers", NUM_WRITERS);
List<CompletableFuture<Void>> writerList = new ArrayList<>();
for (int i = 0; i < NUM_WRITERS; i++) {
log.info("Starting writer{}", i);
writerList.add(startNewWriter(eventData, clientFactory));
}
// create a reader group
log.info("Creating Reader group : {}", readerGroupName);
readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
// create readers
log.info("Creating {} readers", NUM_READERS);
List<CompletableFuture<Void>> readerList = new ArrayList<>();
String readerName = "reader" + RandomFactory.create().nextInt(Integer.MAX_VALUE);
// start reading events
for (int i = 0; i < NUM_READERS; i++) {
log.info("Starting reader{}", i);
readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
}
// wait for writers completion
Futures.allOf(writerList).get();
// set stop read flag to true
stopReadFlag.set(true);
// wait for readers completion
Futures.allOf(readerList).get();
ExecutorServiceHelpers.shutdown(writerPool);
ExecutorServiceHelpers.shutdown(readerPool);
// delete readergroup
log.info("Deleting readergroup {}", readerGroupName);
readerGroupManager.deleteReaderGroup(readerGroupName);
}
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
assertEquals(totalNumberOfEvents.get(), eventsReadFromPravega.size());
// check unique events.
assertEquals(totalNumberOfEvents.get(), new TreeSet<>(eventsReadFromPravega).size());
// seal the stream
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
// delete the stream
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
// delete the scope
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
log.info("Read write test succeeds");
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EventProcessorTest method testEventProcessorRebalance.
@Test(timeout = 60000)
public void testEventProcessorRebalance() throws Exception {
final String scope = "scope";
final String streamName = "stream";
final String readerGroupName = "readerGroup";
controller.createScope(scope).join();
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(4)).build();
controller.createStream(scope, streamName, config).join();
eventSerializer = new EventSerializer<>(new TestSerializer());
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
CheckpointConfig.CheckpointPeriod period = CheckpointConfig.CheckpointPeriod.builder().numEvents(1).numSeconds(1).build();
CheckpointConfig checkpointConfig = CheckpointConfig.builder().type(CheckpointConfig.Type.Periodic).checkpointPeriod(period).build();
EventProcessorGroupConfig eventProcessorGroupConfig = EventProcessorGroupConfigImpl.builder().eventProcessorCount(1).readerGroupName(readerGroupName).streamName(streamName).checkpointConfig(checkpointConfig).build();
LinkedBlockingQueue<Integer> queue1 = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig1 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor2(queue1)).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).minRebalanceIntervalMillis(Duration.ofMillis(100).toMillis()).build();
// create a group and verify that all events can be written and read by readers in this group.
EventProcessorSystem system1 = new EventProcessorSystemImpl("Controller", "process1", scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup1 = system1.createEventProcessorGroup(eventProcessorConfig1, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup1.awaitRunning();
log.info("first event processor started");
@Cleanup EventStreamWriter<TestEvent> writer = clientFactory.createEventWriter(streamName, eventSerializer, EventWriterConfig.builder().build());
// write 10 events and read them back from the queue passed to first event processor's
List<Integer> input = IntStream.range(0, 10).boxed().collect(Collectors.toList());
ConcurrentSkipListSet<Integer> output = new ConcurrentSkipListSet<>();
for (int val : input) {
writer.writeEvent(new TestEvent(val));
}
writer.flush();
// now wait until all the entries are read back.
for (int i = 0; i < 10; i++) {
// read 10 events back
Integer entry = queue1.take();
output.add(entry);
}
assertEquals(10, output.size());
log.info("first event processor read all the messages");
LinkedBlockingQueue<Integer> queue2 = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig2 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor2(queue2)).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).minRebalanceIntervalMillis(Duration.ofMillis(100).toMillis()).build();
// add another system and event processor group (effectively add a new set of readers to the readergroup)
EventProcessorSystem system2 = new EventProcessorSystemImpl("Controller", "process2", scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup2 = system2.createEventProcessorGroup(eventProcessorConfig2, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup2.awaitRunning();
log.info("second event processor started");
AtomicInteger queue1EntriesFound = new AtomicInteger(0);
AtomicInteger queue2EntriesFound = new AtomicInteger(0);
ConcurrentSkipListSet<Integer> output2 = new ConcurrentSkipListSet<>();
// wait until rebalance may have happened.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
AtomicBoolean allAssigned = new AtomicBoolean(false);
Futures.loop(() -> !allAssigned.get(), () -> Futures.delayedFuture(Duration.ofMillis(100), executorService()).thenAccept(v -> {
ReaderSegmentDistribution distribution = readerGroup.getReaderSegmentDistribution();
int numberOfReaders = distribution.getReaderSegmentDistribution().size();
allAssigned.set(numberOfReaders == 2 && distribution.getReaderSegmentDistribution().values().stream().noneMatch(x -> x == 0));
}), executorService()).join();
// write 10 new events
for (int val : input) {
writer.writeEvent(new TestEvent(val));
}
writer.flush();
// wait until at least one event is read from queue2
CompletableFuture.allOf(CompletableFuture.runAsync(() -> {
while (output2.size() < 10) {
Integer entry = queue1.poll();
if (entry != null) {
log.info("entry read from queue 1: {}", entry);
queue1EntriesFound.incrementAndGet();
output2.add(entry);
} else {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
}), CompletableFuture.runAsync(() -> {
while (output2.size() < 10) {
Integer entry = queue2.poll();
if (entry != null) {
log.info("entry read from queue 2: {}", entry);
queue2EntriesFound.incrementAndGet();
output2.add(entry);
} else {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
})).join();
assertTrue(queue1EntriesFound.get() > 0);
assertTrue(queue2EntriesFound.get() > 0);
assertEquals(10, output2.size());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndChannelLeakTest method testDetectChannelLeakSegmentSealedPooled.
@Test(timeout = 30000)
public void testDetectChannelLeakSegmentSealedPooled() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE, 0L).get();
controller.createStream(SCOPE, STREAM_NAME, config).get();
// Set the max number connections to verify channel creation behaviour
final ClientConfig clientConfig = ClientConfig.builder().maxConnectionsPerSegmentStore(5).build();
@Cleanup SocketConnectionFactoryImpl connectionFactory = new SocketConnectionFactoryImpl(clientConfig, new InlineExecutor());
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionPool);
// Create a writer.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(SCOPE, serializer, writerConfig);
// Write an event.
writer.writeEvent("0", "zero").get();
assertChannelCount(1, connectionPool, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM_NAME)).build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", READER_GROUP, serializer, ReaderConfig.builder().disableTimeWindows(true).build());
// Read an event.
EventRead<String> event = reader1.readNextEvent(10000);
assertEquals("zero", event.getEvent());
// scale
Stream stream = new StreamImpl(SCOPE, SCOPE);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
event = reader1.readNextEvent(0);
assertNull(event.getEvent());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
readerGroup.initiateCheckpoint("cp", executor);
event = reader1.readNextEvent(5000);
assertEquals("cp", event.getCheckpointName());
// Write more events.
writer.writeEvent("0", "one").get();
writer.writeEvent("0", "two").get();
writer.writeEvent("1", "three").get();
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
assertChannelCount(5, connectionPool, connectionFactory);
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
assertChannelCount(5, connectionPool, connectionFactory);
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
assertChannelCount(5, connectionPool, connectionFactory);
}
Aggregations