use of io.pravega.client.stream.EventStreamWriter in project pravega by pravega.
the class EndToEndCBRTest method testReaderGroupAutoRetention.
@Test(timeout = 60000)
public void testReaderGroupAutoRetention() throws Exception {
String scope = "test";
String streamName = "testReaderGroupAutoRetention";
String groupName = "testReaderGroupAutoRetention-group";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(RetentionPolicy.bySizeBytes(10, Long.MAX_VALUE)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
Stream stream = Stream.of(scope, streamName);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// write events
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
writer.writeEvent("1", "e1").join();
writer.writeEvent("2", "e2").join();
// Create a ReaderGroup
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).stream(stream).build());
// Create a Reader
AtomicLong clock = new AtomicLong();
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build(), clock::get, clock::get);
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
EventRead<String> read = reader.readNextEvent(60000);
assertEquals("e1", read.getEvent());
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
@Cleanup("shutdown") final InlineExecutor backgroundExecutor = new InlineExecutor();
ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("Checkpoint", backgroundExecutor);
assertFalse(checkpoint.isDone());
read = reader.readNextEvent(60000);
assertTrue(read.isCheckpoint());
assertEquals("Checkpoint", read.getCheckpointName());
assertNull(read.getEvent());
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
read = reader.readNextEvent(60000);
assertEquals("e2", read.getEvent());
Checkpoint cpResult = checkpoint.get(5, TimeUnit.SECONDS);
assertTrue(checkpoint.isDone());
assertEquals("Checkpoint", cpResult.getName());
read = reader.readNextEvent(100);
assertNull(read.getEvent());
assertFalse(read.isCheckpoint());
AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(new StreamImpl(scope, streamName), 0L).join().values().stream().anyMatch(off -> off > 0), 30 * 1000L);
String group2 = groupName + "2";
groupManager.createReaderGroup(group2, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(NameUtils.getScopedStreamName(scope, streamName)).build());
EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group2, serializer, ReaderConfig.builder().build());
EventRead<String> eventRead2 = reader2.readNextEvent(10000);
assertEquals("e2", eventRead2.getEvent());
}
use of io.pravega.client.stream.EventStreamWriter in project pravega by pravega.
the class EndToEndCBRTest method testReaderGroupManualRetention.
@Test(timeout = 60000)
public void testReaderGroupManualRetention() throws Exception {
String scope = "test";
String streamName = "testReaderGroupManualRetention";
String groupName = "testReaderGroupManualRetention-group";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(RetentionPolicy.bySizeBytes(10, Long.MAX_VALUE)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
Stream stream = Stream.of(scope, streamName);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// write events
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
writer.writeEvent("1", "e1").join();
writer.writeEvent("2", "e2").join();
// Create a ReaderGroup
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).stream(stream).build());
// Create a Reader
AtomicLong clock = new AtomicLong();
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build(), clock::get, clock::get);
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
EventRead<String> read = reader.readNextEvent(60000);
assertEquals("e1", read.getEvent());
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
read = reader.readNextEvent(60000);
assertEquals("e2", read.getEvent());
ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
Map<Segment, Long> segmentMap = new HashMap<>();
segmentMap.put(new Segment(scope, streamName, 0), 17L);
Map<Stream, StreamCut> scResult2 = new HashMap<>();
scResult2.put(stream, new StreamCutImpl(stream, segmentMap));
readerGroup.updateRetentionStreamCut(scResult2);
AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(stream, 0L).join().values().stream().anyMatch(off -> off > 0), 30 * 1000L);
String group2 = groupName + "2";
groupManager.createReaderGroup(group2, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(NameUtils.getScopedStreamName(scope, streamName)).build());
EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group2, serializer, ReaderConfig.builder().build());
EventRead<String> eventRead2 = reader2.readNextEvent(10000);
assertEquals("e2", eventRead2.getEvent());
}
use of io.pravega.client.stream.EventStreamWriter in project pravega by pravega.
the class SingleSubscriberUpdateRetentionStreamCutTest method singleSubscriberCBRTest.
@Test
public void singleSubscriberCBRTest() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
// Write a single event.
log.info("Writing event e1 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e1", SIZE_30_EVENT).join();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(READER_GROUP + "-" + 1, READER_GROUP, new JavaSerializer<>(), readerConfig);
// Read one event.
log.info("Reading event e1 from {}/{}", SCOPE, STREAM);
EventRead<String> read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts, 10_000));
Map<Stream, StreamCut> streamCuts = futureCuts.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts);
readerGroup.updateRetentionStreamCut(streamCuts);
// Write two more events.
log.info("Writing event e2 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e2", SIZE_30_EVENT).join();
log.info("Writing event e3 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e3", SIZE_30_EVENT).join();
// Check to make sure truncation happened after the first event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 30.", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 30), 1000, 5 * 60 * 1000L);
// Read next event.
log.info("Reading event e2 from {}/{}", SCOPE, STREAM);
read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts2 = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent2 = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts2, 10_000));
Map<Stream, StreamCut> streamCuts2 = futureCuts2.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts2);
readerGroup.updateRetentionStreamCut(streamCuts2);
// Check to make sure truncation happened after the second event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 60", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 60), 1000, 5 * 60 * 1000L);
}
use of io.pravega.client.stream.EventStreamWriter in project pravega by pravega.
the class AutoScaleProcessorTest method writerCreationTest.
@Test(timeout = 10000)
public void writerCreationTest() throws Exception {
EventStreamClientFactory clientFactory = mock(EventStreamClientFactory.class);
CompletableFuture<Void> createWriterLatch = new CompletableFuture<>();
doAnswer(x -> {
createWriterLatch.complete(null);
throw new RuntimeException();
}).when(clientFactory).createEventWriter(any(), any(), any());
TestAutoScaleProcessor failingWriterProcessor = new TestAutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(), clientFactory, executorService());
String segmentStreamName = "scope/myStreamSegment/0.#epoch.0";
failingWriterProcessor.notifyCreated(segmentStreamName);
assertFalse(failingWriterProcessor.isInitializeStarted());
AtomicReference<EventStreamWriter<AutoScaleEvent>> w = new AtomicReference<>();
AssertExtensions.assertThrows("Bootstrap should not be initiated until isInitializeStarted is true", () -> failingWriterProcessor.bootstrapOnce(clientFactory, w), e -> Exceptions.unwrap(e) instanceof RuntimeException);
// report but since the cooldown time hasnt elapsed, no scale event should be attempted. So no writer should be initialized yet.
failingWriterProcessor.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
assertFalse(failingWriterProcessor.isInitializeStarted());
failingWriterProcessor.setTimeMillis(20 * 60000L);
failingWriterProcessor.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
// the above should initiate the bootstrap.
assertTrue(failingWriterProcessor.isInitializeStarted());
// since we are throwing on writer creation, wait until the writer is invoked once at least
createWriterLatch.join();
// now close the processor. The writer future should get cancelled.
failingWriterProcessor.close();
assertTrue(failingWriterProcessor.getWriterFuture().isCancelled());
// create new processor and let the writer get created
TestAutoScaleProcessor processor = new TestAutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(), clientFactory, executorService());
LinkedBlockingQueue<AutoScaleEvent> queue = new LinkedBlockingQueue<>();
EventStreamWriter<AutoScaleEvent> writerMock = createWriter(queue::add);
doAnswer(x -> writerMock).when(clientFactory).createEventWriter(any(), any(), any());
processor.notifyCreated(segmentStreamName);
// report a low rate to trigger a scale down
processor.setTimeMillis(21 * 60000L);
processor.report(segmentStreamName, 10, 0L, 1.0, 1.0, 1.0, 1.0);
assertTrue(processor.isInitializeStarted());
AssertExtensions.assertEventuallyEquals(writerMock, () -> processor.getWriterFuture().join(), 10000L);
AutoScaleEvent event = queue.take();
assertEquals(event.getDirection(), AutoScaleEvent.DOWN);
processor.close();
// create third writer, this time supply the writer directly
EventStreamWriter<AutoScaleEvent> writer = spy(createWriter(e -> {
}));
// verify that when writer is set, we are able to get the processor initialized
TestAutoScaleProcessor processor2 = new TestAutoScaleProcessor(writer, AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(), executorService());
processor2.notifyCreated(segmentStreamName);
assertFalse(processor2.isInitializeStarted());
processor2.setTimeMillis(20 * 60000L);
processor2.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
// the above should create a writer future.
assertTrue(processor2.isInitializeStarted());
assertTrue(Futures.isSuccessful(processor2.getWriterFuture()));
processor2.close();
verify(writer, times(1)).close();
}
use of io.pravega.client.stream.EventStreamWriter in project pravega by pravega.
the class EndToEndAutoScaleUpTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port, false);
Controller controller = controllerWrapper.getController();
ClientFactoryImpl internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
@Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
@Cleanup ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup AutoScaleMonitor autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, false, "localhost", 12345, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", CONFIG).get();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
// Mocking pravega service by putting scale up and scale down requests for the stream
@Cleanup EventStreamWriter<String> test = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build());
// keep writing. Scale should happen
long start = System.currentTimeMillis();
char[] chars = new char[1];
Arrays.fill(chars, 'a');
String str = new String(chars);
CompletableFuture.runAsync(() -> {
while (System.currentTimeMillis() - start < Duration.ofMinutes(3).toMillis()) {
try {
test.writeEvent("0", str).get();
} catch (Throwable e) {
System.err.println("test exception writing events " + e.getMessage());
break;
}
}
});
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().size() > 3) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), executor).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
System.exit(-1);
}
System.exit(0);
}
Aggregations