use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndChannelLeakTest method testDetectChannelLeakSegmentSealed.
@Test(timeout = 30000)
public void testDetectChannelLeakSegmentSealed() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE, 0L).get();
controller.createStream(SCOPE, STREAM_NAME, config).get();
// Set the max number connections to verify channel creation behaviour
final ClientConfig clientConfig = ClientConfig.builder().maxConnectionsPerSegmentStore(500).build();
@Cleanup SocketConnectionFactoryImpl connectionFactory = new SocketConnectionFactoryImpl(clientConfig, executor);
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionPool);
int channelCount = 0;
assertChannelCount(channelCount, connectionPool, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM_NAME)).build());
// Should not add any connections
assertChannelCount(channelCount, connectionPool, connectionFactory);
// Create a writer.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(SCOPE, serializer, writerConfig);
// Write an event.
writer.writeEvent("0", "zero").get();
channelCount += 1;
assertChannelCount(channelCount, connectionPool, connectionFactory);
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", READER_GROUP, serializer, ReaderConfig.builder().disableTimeWindows(true).build());
// One for segment 3 for state synchronizer
channelCount += 4;
assertChannelCount(channelCount, connectionPool, connectionFactory);
// Read an event.
EventRead<String> event = reader1.readNextEvent(10000);
assertEquals("zero", event.getEvent());
channelCount += 1;
assertChannelCount(channelCount, connectionPool, connectionFactory);
// scale
Stream stream = new StreamImpl(SCOPE, SCOPE);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
event = reader1.readNextEvent(0);
assertNull(event.getEvent());
// Reader should see EOS
channelCount -= 1;
assertChannelCount(channelCount, connectionPool, connectionFactory);
// should detect end of segment
writer.writeEvent("1", "one").get();
// Close one segment open 3.
channelCount += 2;
assertChannelCount(channelCount, connectionPool, connectionFactory);
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
readerGroup.getMetrics().unreadBytes();
CompletableFuture<Checkpoint> future = readerGroup.initiateCheckpoint("cp1", executor);
// 3 more from the state synchronizer
channelCount += 4;
assertChannelCount(channelCount, connectionPool, connectionFactory);
event = reader1.readNextEvent(5000);
assertEquals("cp1", event.getCheckpointName());
event = reader1.readNextEvent(10000);
assertEquals("one", event.getEvent());
// From new segments on reader
channelCount += 3;
assertChannelCount(channelCount, connectionPool, connectionFactory);
future.join();
// Checkpoint should close connections back down
readerGroup.close();
channelCount -= 4;
assertChannelCount(channelCount, connectionPool, connectionFactory);
// Write more events.
writer.writeEvent("2", "two").get();
writer.writeEvent("3", "three").get();
writer.writeEvent("4", "four").get();
// no changes to socket count.
assertChannelCount(channelCount, connectionPool, connectionFactory);
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
// no changes to socket count.
assertChannelCount(channelCount, connectionPool, connectionFactory);
reader1.close();
// 3 from segments 4 from group state.
channelCount -= 7;
assertChannelCount(channelCount, connectionPool, connectionFactory);
groupManager.close();
writer.close();
assertChannelCount(0, connectionPool, connectionFactory);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndChannelLeakTest method testDetectChannelLeakMultiReaderPooled.
@Test(timeout = 30000)
public void testDetectChannelLeakMultiReaderPooled() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
// Set the max number connections to verify channel creation behaviour
final ClientConfig clientConfig = ClientConfig.builder().maxConnectionsPerSegmentStore(5).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE, 0L).get();
controller.createStream(SCOPE, STREAM_NAME, config).get();
@Cleanup SocketConnectionFactoryImpl connectionFactory = new SocketConnectionFactoryImpl(clientConfig, executor);
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionPool);
// open socket count.
int expectedChannelCount = 0;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
// Create a writer and write an event.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM_NAME, serializer, writerConfig);
writer.writeEvent("0", "zero").get();
// connection to segment 0.
expectedChannelCount += 1;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory);
// no changes expected.
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM_NAME)).build());
// create a reader and read an event.
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", READER_GROUP, serializer, ReaderConfig.builder().disableTimeWindows(true).build());
// Creating a reader spawns a revisioned stream client which opens 4 sockets ( read, write, metadataClient and conditionalUpdates).
EventRead<String> event = reader1.readNextEvent(10000);
// reader creates a new connection to the segment 0;
assertEquals("zero", event.getEvent());
// Connection to segment 0 does not cause an increase in number of open connections since we have reached the maxConnection count.
assertChannelCount(5, connectionPool, connectionFactory);
// scale
Stream stream = new StreamImpl(SCOPE, STREAM_NAME);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
// No changes to the channel count.
assertChannelCount(5, connectionPool, connectionFactory);
// Reaches EOS
event = reader1.readNextEvent(1000);
assertNull(event.getEvent());
// Write more events.
writer.writeEvent("1", "one").get();
writer.writeEvent("2", "two").get();
writer.writeEvent("3", "three").get();
writer.writeEvent("4", "four").get();
writer.writeEvent("5", "five").get();
writer.writeEvent("6", "six").get();
// 2 new flows are opened.(+3 connections to the segments 1,2,3 after scale by the writer,
// -1 flow to segment 0 which is sealed.)
assertChannelCount(5, connectionPool, connectionFactory);
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
CompletableFuture<Checkpoint> future = readerGroup.initiateCheckpoint("cp1", executor);
// 4 more from the state synchronizer
assertChannelCount(5, connectionPool, connectionFactory);
event = reader1.readNextEvent(5000);
assertEquals("cp1", event.getCheckpointName());
event = reader1.readNextEvent(5000);
assertNotNull(event.getEvent());
future.join();
// Checkpoint should close connections back down
readerGroup.close();
assertChannelCount(5, connectionPool, connectionFactory);
event = reader1.readNextEvent(10000);
assertNotNull(event.getEvent());
assertChannelCount(5, connectionPool, connectionFactory);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndChannelLeakTest method testDetectChannelLeakMultiReader.
@Test(timeout = 30000)
public void testDetectChannelLeakMultiReader() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
// Set the max number connections to verify channel creation behaviour
final ClientConfig clientConfig = ClientConfig.builder().maxConnectionsPerSegmentStore(500).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE, 0L).get();
controller.createStream(SCOPE, STREAM_NAME, config).get();
@Cleanup SocketConnectionFactoryImpl connectionFactory = new SocketConnectionFactoryImpl(clientConfig, new InlineExecutor());
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(clientConfig, connectionFactory);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionPool);
// open socket count.
int expectedChannelCount = 0;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
// Create a writer and write an event.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM_NAME, serializer, writerConfig);
writer.writeEvent("0", "zero").get();
// connection to segment 0.
expectedChannelCount += 1;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory);
// no changes expected.
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM_NAME)).build());
// create a reader and read an event.
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", READER_GROUP, serializer, ReaderConfig.builder().disableTimeWindows(true).build());
// Creating a reader spawns a revisioned stream client which opens 4 sockets ( read, write, metadataClient and conditionalUpdates).
expectedChannelCount += 4;
EventRead<String> event = reader1.readNextEvent(10000);
// reader creates a new connection to the segment 0;
expectedChannelCount += 1;
assertEquals("zero", event.getEvent());
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
// scale
Stream stream = new StreamImpl(SCOPE, STREAM_NAME);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
// No changes to the channel count.
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
event = reader1.readNextEvent(0);
assertNull(event.getEvent());
event = reader1.readNextEvent(0);
assertNull(event.getEvent());
// should decrease channel count from close connection
expectedChannelCount -= 1;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
// Write more events.
writer.writeEvent("1", "one").get();
writer.writeEvent("2", "two").get();
writer.writeEvent("3", "three").get();
writer.writeEvent("4", "four").get();
writer.writeEvent("5", "five").get();
writer.writeEvent("6", "six").get();
// Open 3 new segments close one old one.
expectedChannelCount += 2;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
CompletableFuture<Checkpoint> future = readerGroup.initiateCheckpoint("cp1", executor);
// 4 more from the state synchronizer
expectedChannelCount += 4;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
event = reader1.readNextEvent(5000);
assertEquals("cp1", event.getCheckpointName());
// Add a new reader
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", READER_GROUP, serializer, ReaderConfig.builder().disableTimeWindows(true).build());
// Creating a reader spawns a revisioned stream client which opens 4 sockets ( read, write, metadataClient and conditionalUpdates).
expectedChannelCount += 4;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
event = reader1.readNextEvent(5000);
assertNotNull(event.getEvent());
event = reader2.readNextEvent(5000);
assertNotNull(event.getEvent());
// 3 more from the new segments
expectedChannelCount += 3;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
future.join();
// Checkpoint should close connections back down
readerGroup.close();
expectedChannelCount -= 4;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
reader1.close();
reader2.close();
expectedChannelCount -= 8 + 3;
assertChannelCount(expectedChannelCount, connectionPool, connectionFactory);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndTruncationTest method testWriteDuringTruncationAndDeletion.
@Test(timeout = 30000)
public void testWriteDuringTruncationAndDeletion() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testWriteDuringTruncationAndDeletion";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
// routing key "0" translates to key 0.8. This write happens to segment 1.
writer.writeEvent("0", "truncationTest1").get();
// scale down to one segment.
Stream stream = new StreamImpl("test", streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
assertTrue("Stream Scale down", controller.scaleStream(stream, Lists.newArrayList(0L, 1L), map, executorService()).getFuture().get());
// truncate stream at segment 2, offset 0.
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(computeSegmentId(2, 1), 0L);
assertTrue("Truncate stream", controller.truncateStream("test", streamName, streamCutPositions).get());
// routing key "2" translates to key 0.2.
// this write translates to a write to Segment 0, but since segment 0 is truncated the write should happen on segment 2.
// write to segment 0
writer.writeEvent("2", "truncationTest2").get();
String group = "testWriteDuringTruncationAndDeletion-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("truncationTest2", event.getEvent());
// Seal and Delete stream.
assertTrue(controller.sealStream("test", streamName).get());
assertTrue(controller.deleteStream("test", streamName).get());
// write by an existing writer to a deleted stream should complete exceptionally.
assertFutureThrows("Should throw NoSuchSegmentException", writer.writeEvent("2", "write to deleted stream"), e -> NoSuchSegmentException.class.isAssignableFrom(e.getClass()));
// subsequent writes will throw an exception to the application.
assertThrows(RuntimeException.class, () -> writer.writeEvent("test"));
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class EndToEndTruncationTest method testSimpleOffsetTruncation.
/**
* This test checks the basic operation of truncation with offsets. The test first writes two events on a Stream
* (1 segment) and then truncates the Stream after the first event. We verify that a new reader first gets a
* TruncatedDataException and then it reads only the second event written, as the first has been truncated.
*
* @throws ReinitializationRequiredException If a checkpoint or reset is performed on the reader group.
*/
@Test(timeout = 30000)
public void testSimpleOffsetTruncation() throws ReinitializationRequiredException {
final String scope = "truncationTests";
final String streamName = "testSimpleOffsetTruncation";
final String readerGroupName = "RGTestSimpleOffsetTruncation";
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, streamConfiguration);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + streamName).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write two events to the Stream.
writeEvents(clientFactory, streamName, 2);
// Read only the first one.
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(readerGroupName + "1", readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals(reader.readNextEvent(5000).getEvent(), "0");
reader.close();
// Create a Checkpoint, get StreamCut and truncate the Stream at that point.
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint", executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Verify that a new reader reads from event 1 onwards.
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
@Cleanup final EventStreamReader<String> newReader = clientFactory.createReader(newReaderGroupName + "2", newReaderGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals("Expected read event: ", "1", newReader.readNextEvent(5000).getEvent());
assertNull(newReader.readNextEvent(5000).getEvent());
}
Aggregations