use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndChannelLeakTest method testDetectChannelLeakSegmentSealed.
@Test(timeout = 30000)
public void testDetectChannelLeakSegmentSealed() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(STREAM_NAME).scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
@Cleanup ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
// Create a writer.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(SCOPE, new JavaSerializer<>(), EventWriterConfig.builder().build());
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory, connectionFactory);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM_NAME)).build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", READER_GROUP, new JavaSerializer<>(), ReaderConfig.builder().build());
// Write an event.
writer.writeEvent("0", "zero").get();
// Read an event.
EventRead<String> event = reader1.readNextEvent(10000);
assertNotNull(event);
assertEquals("zero", event.getEvent());
// scale
Stream stream = new StreamImpl(SCOPE, SCOPE);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue(result);
// Write more events.
writer.writeEvent("0", "one").get();
writer.writeEvent("0", "two").get();
writer.writeEvent("1", "three").get();
// store the open channel count before reading.
int channelCount = connectionFactory.getActiveChannelCount();
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
// Number of sockets will increase by 2 ( +3 for the new segments -1 since the older segment is sealed).
assertEquals(channelCount + 2, connectionFactory.getActiveChannelCount());
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
// no changes to socket count.
assertEquals(channelCount + 2, connectionFactory.getActiveChannelCount());
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
// no changes to socket count.
assertEquals(channelCount + 2, connectionFactory.getActiveChannelCount());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndChannelLeakTest method testDetectChannelLeakMultiReader.
@Test(timeout = 30000)
public void testDetectChannelLeakMultiReader() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(STREAM_NAME).scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
@Cleanup ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
// Create a writer.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build());
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory, connectionFactory);
ReaderGroup readerGroup = groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM_NAME)).build());
// create a reader.
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", READER_GROUP, serializer, ReaderConfig.builder().build());
// Write an event.
writer.writeEvent("0", "zero").get();
// Total 4 sockets are open at this point : Writer has 1 connection to segment 0 of stream +
// Reader has 3 connections (1 metadata client + 1 Writer to _RGreader/0 + 1 Reader to _RGreader/0)
assertEquals(4, connectionFactory.getActiveChannelCount());
int channelCount = 4;
// Read an event.
EventRead<String> event = reader1.readNextEvent(10000);
assertNotNull(event);
assertEquals("zero", event.getEvent());
// +1 socket to segment 0 of the stream.
assertEquals(channelCount + 1, connectionFactory.getActiveChannelCount());
channelCount = channelCount + 1;
// scale
Stream stream = new StreamImpl(SCOPE, STREAM_NAME);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue(result);
// No changes to the channel count.
assertEquals(channelCount, connectionFactory.getActiveChannelCount());
// Write more events.
writer.writeEvent("0", "one").get();
writer.writeEvent("0", "two").get();
writer.writeEvent("1", "three").get();
// 2 new connections(+3 connections to the segments 1,2,3 after scale by the writer,
// -1 connection to segment 0 which is sealed.)
assertEquals(channelCount + 2, connectionFactory.getActiveChannelCount());
channelCount = channelCount + 2;
// Add a new reader
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", READER_GROUP, serializer, ReaderConfig.builder().build());
// Creation of a reader will add 3 more connections details similar to the above comment.
assertEquals(channelCount + 3, connectionFactory.getActiveChannelCount());
channelCount = channelCount + 3;
event = reader1.readNextEvent(10000);
assertNotNull(event);
// +1 connection (-1 since segment 0 of stream is sealed + 2 connections to two segments of stream (there are
// 2 readers and 3 segments and the reader1 will be assigned 2 segments))
assertEquals(channelCount + 1, connectionFactory.getActiveChannelCount());
channelCount = channelCount + 1;
event = reader2.readNextEvent(10000);
assertNotNull(event);
// +1 connection (a new connection to the remaining stream segment)
assertEquals(channelCount + 1, connectionFactory.getActiveChannelCount());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndTxnWithScaleTest method testTxnWithScale.
@Test(timeout = 10000)
public void testTxnWithScale() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope("test").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test").get();
controller.createStream(config).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> test = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutScaleGracePeriod(10000).transactionTimeoutTime(10000).build());
Transaction<String> transaction = test.beginTxn();
transaction.writeEvent("0", "txntest1");
transaction.commit();
// scale
Stream stream = new StreamImpl("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executorService).getFuture().get();
assertTrue(result);
transaction = test.beginTxn();
transaction.writeEvent("0", "txntest2");
transaction.commit();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory);
groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("txntest1", event.getEvent());
event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("txntest2", event.getEvent());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndAutoScaleUpWithTxnTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactory internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
@Cleanup SegmentStatsFactory segmentStatsFactory = new SegmentStatsFactory();
SegmentStatsRecorder statsRecorder = segmentStatsFactory.createSegmentStatsRecorder(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, "localhost", 12345, store, statsRecorder, null, null, null);
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test").get();
controller.createStream(CONFIG).get();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("test", controller);
// Mocking pravega service by putting scale up and scale down requests for the stream
EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(30000).transactionTimeoutScaleGracePeriod(30000).build();
EventStreamWriter<String> test = clientFactory.createEventWriter("test", new JavaSerializer<>(), writerConfig);
// region Successful commit tests
Transaction<String> txn1 = test.beginTxn();
txn1.writeEvent("1");
txn1.flush();
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0 / 3.0);
map.put(1.0 / 3.0, 2.0 / 3.0);
map.put(2.0 / 3.0, 1.0);
Stream stream = new StreamImpl("test", "test");
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
Transaction<String> txn2 = test.beginTxn();
txn2.writeEvent("2");
txn2.flush();
txn2.commit();
txn1.commit();
Thread.sleep(1000);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().stream("test").build());
final EventStreamReader<String> reader = clientFactory.createReader("1", "readergrp", new JavaSerializer<>(), ReaderConfig.builder().build());
String event1 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
String event2 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
assert event1.equals("1");
assert event2.equals("2");
final AtomicBoolean done = new AtomicBoolean(false);
startWriter(test, done);
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().size() > 3) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), Executors.newSingleThreadScheduledExecutor()).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
log.error("Test failed with exception: {}", e);
System.exit(-1);
}
System.exit(0);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class BatchClientTest method testBatchClient.
@Test(timeout = 50000)
public void testBatchClient() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(STREAM).scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
// create reader and writer.
@Cleanup ClientFactory clientFactory = ClientFactory.withScope(SCOPE, controllerUri);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer, EventWriterConfig.builder().build());
// write events to stream with 1 segment.
writeEvents(writer);
// scale up and write events.
Stream stream = new StreamImpl(SCOPE, STREAM);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue("Scale up operation", result);
writeEvents(writer);
// scale down and write events.
map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
result = controller.scaleStream(stream, Arrays.asList(1, 2, 3), map, executor).getFuture().get();
assertTrue("Scale down operation result", result);
writeEvents(writer);
BatchClient batchClient = clientFactory.createBatchClient();
// List out all the segments in the stream.
ArrayList<SegmentRange> segments = Lists.newArrayList(batchClient.getSegments(stream, null, null).getIterator());
assertEquals("Expected number of segments", 6, segments.size());
// Batch read all events from stream.
List<String> batchEventList = new ArrayList<>();
segments.forEach(segInfo -> {
@Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(segInfo, serializer);
batchEventList.addAll(Lists.newArrayList(segmentIterator));
});
assertEquals("Event count", 9, batchEventList.size());
// read from a given offset.
Segment seg0 = new Segment(SCOPE, STREAM, 0);
SegmentRange seg0Info = SegmentRangeImpl.builder().segment(seg0).startOffset(60).endOffset(90).build();
@Cleanup SegmentIterator<String> seg0Iterator = batchClient.readSegment(seg0Info, serializer);
ArrayList<String> dataAtOffset = Lists.newArrayList(seg0Iterator);
assertEquals(1, dataAtOffset.size());
assertEquals(DATA_OF_SIZE_30, dataAtOffset.get(0));
}
Aggregations