use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.
the class EndToEndTruncationTest method testWriteOnSealedStream.
@Test(timeout = 50000)
public void testWriteOnSealedStream() throws Exception {
JavaSerializer<String> serializer = new JavaSerializer<>();
EventWriterConfig writerConfig = EventWriterConfig.builder().build();
String scope = "testSeal";
String streamName = "testWriteOnSealedStream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream(scope, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, writerConfig);
// write an event.
writer.writeEvent("0", "data").get();
// Seal Stream.
assertTrue(controller.sealStream(scope, streamName).get());
// Write by an existing writer to a sealed stream should complete exceptionally.
assertFutureThrows("Should throw IllegalStateException", writer.writeEvent("2", "Write to sealed stream"), e -> IllegalStateException.class.isAssignableFrom(e.getClass()));
// Subsequent writes will throw an exception.
assertThrows(IllegalStateException.class, () -> writer.writeEvent("testEvent"));
// Creating a writer against a sealed stream throws an exception.
assertThrows(IllegalStateException.class, () -> clientFactory.createEventWriter(streamName, serializer, writerConfig));
}
use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.
the class SingleSubscriberUpdateRetentionStreamCutTest method singleSubscriberCBRTest.
@Test
public void singleSubscriberCBRTest() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
// Write a single event.
log.info("Writing event e1 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e1", SIZE_30_EVENT).join();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(READER_GROUP + "-" + 1, READER_GROUP, new JavaSerializer<>(), readerConfig);
// Read one event.
log.info("Reading event e1 from {}/{}", SCOPE, STREAM);
EventRead<String> read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts, 10_000));
Map<Stream, StreamCut> streamCuts = futureCuts.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts);
readerGroup.updateRetentionStreamCut(streamCuts);
// Write two more events.
log.info("Writing event e2 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e2", SIZE_30_EVENT).join();
log.info("Writing event e3 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e3", SIZE_30_EVENT).join();
// Check to make sure truncation happened after the first event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 30.", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 30), 1000, 5 * 60 * 1000L);
// Read next event.
log.info("Reading event e2 from {}/{}", SCOPE, STREAM);
read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts2 = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent2 = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts2, 10_000));
Map<Stream, StreamCut> streamCuts2 = futureCuts2.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts2);
readerGroup.updateRetentionStreamCut(streamCuts2);
// Check to make sure truncation happened after the second event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 60", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 60), 1000, 5 * 60 * 1000L);
}
use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.
the class AutoScaleTest method scaleUpTxnTest.
/**
* Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each
* writer writes using transactions. The test will periodically check if a scale event has occurred by talking to
* controller via controller client.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
private CompletableFuture<Void> scaleUpTxnTest() {
ControllerImpl controller = getController();
final AtomicBoolean exit = new AtomicBoolean(false);
ClientFactoryImpl clientFactory = getClientFactory();
startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer", SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit);
// overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first.
return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME).thenAccept(x -> {
if (x.getSegments().size() == 1) {
throw new ScaleOperationNotDoneException();
} else {
log.info("txn test scale up done successfully");
exit.set(true);
}
}), scaleExecutorService);
}
use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.
the class BatchClientSimpleTest method readFromRanges.
// Start utils region
private int readFromRanges(List<SegmentRange> ranges, BatchClientFactory batchClient) {
List<CompletableFuture<Integer>> eventCounts = ranges.parallelStream().map(range -> CompletableFuture.supplyAsync(() -> batchClient.readSegment(range, new JavaSerializer<>())).thenApplyAsync(segmentIterator -> {
log.debug("Thread " + Thread.currentThread().getId() + " reading events.");
int numEvents = Lists.newArrayList(segmentIterator).size();
segmentIterator.close();
return numEvents;
})).collect(Collectors.toList());
return eventCounts.stream().map(CompletableFuture::join).mapToInt(Integer::intValue).sum();
}
use of io.pravega.client.stream.impl.JavaSerializer in project pravega by pravega.
the class ReadWithAutoScaleTest method scaleTestsWithReader.
@Test
public void scaleTestsWithReader() {
URI controllerUri = getControllerURI();
Controller controller = getController();
testState = new TestState(true);
final AtomicBoolean stopWriteFlag = new AtomicBoolean(false);
final AtomicBoolean stopReadFlag = new AtomicBoolean(false);
@Cleanup EventStreamClientFactory clientFactory = getClientFactory();
// 1. Start writing events to the Stream.
List<CompletableFuture<Void>> writers = new ArrayList<>();
writers.add(startWritingIntoTxn(clientFactory.createTransactionalEventWriter("initWriter", STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(25000).build()), stopWriteFlag));
// 2. Start a reader group with 2 readers (The stream is configured with 2 segments.)
// 2.1 Create a reader group.
log.info("Creating Reader group : {}", READER_GROUP_NAME);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, Utils.buildClientConfig(controllerUri));
readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_NAME)).build());
// 2.2 Create readers.
CompletableFuture<Void> reader1 = startReading(clientFactory.createReader("reader1", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()), stopReadFlag);
CompletableFuture<Void> reader2 = startReading(clientFactory.createReader("reader2", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()), stopReadFlag);
// 3 Now increase the number of TxnWriters to trigger scale operation.
log.info("Increasing the number of writers to 6");
for (int i = 0; i < 5; i++) {
writers.add(startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer-" + i, STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(25000).build()), stopWriteFlag));
}
// 4 Wait until the scale operation is triggered (else time out)
// validate the data read by the readers ensuring all the events are read and there are no duplicates.
CompletableFuture<Void> testResult = Retry.withExpBackoff(10, 10, 40, ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, STREAM_NAME).thenAccept(x -> {
int currentNumOfSegments = x.getSegments().size();
if (currentNumOfSegments == 2) {
log.info("The current number of segments is equal to 2, ScaleOperation did not happen");
// Scaling operation did not happen, retry operation.
throw new ScaleOperationNotDoneException();
} else if (currentNumOfSegments > 2) {
// scale operation successful.
log.info("Current Number of segments is {}", currentNumOfSegments);
stopWriteFlag.set(true);
} else {
Assert.fail("Current number of Segments reduced to less than 2. Failure of test");
}
}), scaleExecutorService).thenCompose(v -> Futures.allOf(writers)).thenRun(this::waitForTxnsToComplete).thenCompose(v -> {
stopReadFlag.set(true);
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", testState.writtenEvents, testState.readEvents);
return CompletableFuture.allOf(reader1, reader2);
}).thenRun(this::validateResults);
Futures.getAndHandleExceptions(testResult.whenComplete((r, e) -> {
recordResult(testResult, "ScaleUpWithTxnWithReaderGroup");
}), RuntimeException::new);
readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
Aggregations