use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class EndToEndTruncationTest method testWriteDuringTruncationAndDeletion.
@Test(timeout = 30000)
public void testWriteDuringTruncationAndDeletion() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testWriteDuringTruncationAndDeletion";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
// routing key "0" translates to key 0.8. This write happens to segment 1.
writer.writeEvent("0", "truncationTest1").get();
// scale down to one segment.
Stream stream = new StreamImpl("test", streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
assertTrue("Stream Scale down", controller.scaleStream(stream, Lists.newArrayList(0L, 1L), map, executorService()).getFuture().get());
// truncate stream at segment 2, offset 0.
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(computeSegmentId(2, 1), 0L);
assertTrue("Truncate stream", controller.truncateStream("test", streamName, streamCutPositions).get());
// routing key "2" translates to key 0.2.
// this write translates to a write to Segment 0, but since segment 0 is truncated the write should happen on segment 2.
// write to segment 0
writer.writeEvent("2", "truncationTest2").get();
String group = "testWriteDuringTruncationAndDeletion-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("truncationTest2", event.getEvent());
// Seal and Delete stream.
assertTrue(controller.sealStream("test", streamName).get());
assertTrue(controller.deleteStream("test", streamName).get());
// write by an existing writer to a deleted stream should complete exceptionally.
assertFutureThrows("Should throw NoSuchSegmentException", writer.writeEvent("2", "write to deleted stream"), e -> NoSuchSegmentException.class.isAssignableFrom(e.getClass()));
// subsequent writes will throw an exception to the application.
assertThrows(RuntimeException.class, () -> writer.writeEvent("test"));
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class RetentionTest method testRetentionSize.
@Test(timeout = 30000)
public void testRetentionSize() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).retentionPolicy(RetentionPolicy.bySizeBytes(10)).build();
LocalController controller = (LocalController) controllerWrapper.getController();
String name = "testsize";
Stream stream = new StreamImpl(name, name);
controllerWrapper.getControllerService().createScope(name, 0L).get();
controller.createStream(name, name, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(controllerURI).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(name, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(name, new JavaSerializer<>(), EventWriterConfig.builder().build());
Map<Segment, Long> x = controller.getSegmentsAtTime(stream, 0L).join();
assertTrue(x.values().stream().allMatch(a -> a == 0));
AtomicBoolean continueLoop = new AtomicBoolean(true);
Futures.loop(continueLoop::get, () -> writer.writeEvent("a"), executor);
AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(stream, 0L).join().values().stream().anyMatch(a -> a > 0), 30 * 1000L);
continueLoop.set(false);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class RetentionTest method testRetentionTime.
@Test(timeout = 30000)
public void testRetentionTime() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).retentionPolicy(RetentionPolicy.byTime(Duration.ofSeconds(1))).build();
LocalController controller = (LocalController) controllerWrapper.getController();
String name = "testtime";
Stream stream = new StreamImpl(name, name);
controllerWrapper.getControllerService().createScope(name, 0L).get();
controller.createStream(name, name, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(controllerURI).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(name, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(name, new JavaSerializer<>(), EventWriterConfig.builder().build());
Map<Segment, Long> x = controller.getSegmentsAtTime(stream, 0L).join();
assertTrue(x.values().stream().allMatch(a -> a == 0));
AtomicBoolean continueLoop = new AtomicBoolean(true);
Futures.loop(continueLoop::get, () -> writer.writeEvent("a"), executor);
AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(stream, 0L).join().values().stream().anyMatch(a -> a > 0), 30 * 1000L);
continueLoop.set(false);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class ReaderGroupNotificationTest method testEndOfStreamNotifications.
@Test(timeout = 40000)
public void testEndOfStreamNotifications() throws Exception {
final String streamName = "stream2";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE, 0L).get();
controller.createStream(SCOPE, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
// scale
Stream stream = new StreamImpl(SCOPE, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executorService()).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "data2").get();
// seal stream
assertTrue(controller.sealStream(SCOPE, streamName).get());
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory);
groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, streamName)).groupRefreshTimeMillis(0).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("reader");
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().initialAllocationDelay(0).build());
// Add segment event listener
Listener<EndOfDataNotification> l1 = notification -> {
listenerInvoked.set(true);
listenerLatch.release();
};
EndOfDataNotifier endOfDataNotifier = (EndOfDataNotifier) readerGroup.getEndOfDataNotifier(executorService());
endOfDataNotifier.registerListener(l1);
EventRead<String> event1 = reader1.readNextEvent(10000);
assertEquals("data1", event1.getEvent());
EventRead<String> emptyEvent = reader1.readNextEvent(0);
assertNull(emptyEvent.getEvent());
assertFalse(emptyEvent.isCheckpoint());
readerGroup.initiateCheckpoint("cp", executorService());
EventRead<String> cpEvent = reader1.readNextEvent(10000);
assertTrue(cpEvent.isCheckpoint());
EventRead<String> event2 = reader1.readNextEvent(10000);
assertEquals("data2", event2.getEvent());
emptyEvent = reader1.readNextEvent(0);
assertNull(emptyEvent.getEvent());
assertFalse(emptyEvent.isCheckpoint());
emptyEvent = reader1.readNextEvent(0);
assertNull(emptyEvent.getEvent());
assertFalse(emptyEvent.isCheckpoint());
readerGroup.initiateCheckpoint("cp2", executorService());
cpEvent = reader1.readNextEvent(10000);
assertTrue(cpEvent.isCheckpoint());
emptyEvent = reader1.readNextEvent(0);
assertNull(emptyEvent.getEvent());
assertFalse(emptyEvent.isCheckpoint());
endOfDataNotifier.pollNow();
listenerLatch.await();
assertTrue("Listener invoked", listenerInvoked.get());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class ReaderGroupNotificationTest method testSegmentNotifications.
@Test(timeout = 40000)
public void testSegmentNotifications() throws Exception {
final String streamName = "stream1";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE, 0L).get();
controller.createStream(SCOPE, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
// scale
Stream stream = new StreamImpl(SCOPE, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executorService()).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "data2").get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory);
groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, streamName)).groupRefreshTimeMillis(0).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("reader");
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().initialAllocationDelay(0).build());
val notificationResults = new ArrayBlockingQueue<SegmentNotification>(2);
// Add segment event listener
Listener<SegmentNotification> l1 = notification -> {
log.info("Number of Segments: {}, Number of Readers: {}", notification.getNumOfSegments(), notification.getNumOfReaders());
notificationResults.add(notification);
};
SegmentNotifier segmentNotifier = (SegmentNotifier) readerGroup.getSegmentNotifier(executorService());
segmentNotifier.registerListener(l1);
// Read first event and validate notification.
EventRead<String> event1 = reader1.readNextEvent(5000);
assertEquals("data1", event1.getEvent());
segmentNotifier.pollNow();
SegmentNotification initialSegmentNotification = notificationResults.take();
assertNotNull(initialSegmentNotification);
assertEquals(1, initialSegmentNotification.getNumOfReaders());
assertEquals(1, initialSegmentNotification.getNumOfSegments());
EventRead<String> emptyEvent = reader1.readNextEvent(0);
assertNull(emptyEvent.getEvent());
assertFalse(emptyEvent.isCheckpoint());
readerGroup.initiateCheckpoint("cp", executorService());
EventRead<String> cpEvent = reader1.readNextEvent(1000);
assertTrue(cpEvent.isCheckpoint());
// Read second event and validate notification.
EventRead<String> event2 = reader1.readNextEvent(10000);
assertEquals("data2", event2.getEvent());
segmentNotifier.pollNow();
SegmentNotification segmentNotificationPostScale = notificationResults.take();
assertEquals(1, segmentNotificationPostScale.getNumOfReaders());
assertEquals(2, segmentNotificationPostScale.getNumOfSegments());
}
Aggregations