use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class StreamCutsTest method streamCutsTest.
/**
* This test verifies the correct operation of readers using StreamCuts. Concretely, the test creates two streams
* with different number of segments and it writes some events (TOTAL_EVENTS / 2) in them. Then, the test creates a
* list of StreamCuts that encompasses both streams every CUT_SIZE events. The test asserts that new groups of
* readers can be initialized at these sequential StreamCut intervals and that only CUT_SIZE events are read. Also,
* the test checks the correctness of different combinations of StreamCuts that have not been sequentially created.
* After creating StreamCuts and tests the correctness of reads, the test also checks resetting a reader group to a
* specific initial read point. The previous process is repeated twice: before and after scaling streams, to test if
* StreamCuts work correctly under scaling events (thus writing TOTAL_EVENTS). Finally, this test checks reading
* different StreamCut combinations in both streams for all events (encompassing events before and after scaling).
*/
@Test
public void streamCutsTest() {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_ONE)).stream(Stream.of(SCOPE, STREAM_TWO)).build());
@Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
// Perform write of events, slice by slice StreamCuts test and combinations StreamCuts test.
log.info("Write, slice by slice and combinations test before scaling.");
final int parallelismBeforeScale = RG_PARALLELISM_ONE + RG_PARALLELISM_TWO;
List<Map<Stream, StreamCut>> slicesBeforeScale = writeEventsAndCheckSlices(clientFactory, readerGroup, readerGroupManager, parallelismBeforeScale);
// Now, we perform a manual scale on both streams and wait until it occurs.
CompletableFuture<Boolean> scaleStreamOne = scaleStream(SCOPE, STREAM_ONE, RG_PARALLELISM_ONE * 2, executor);
checkScaleStatus(scaleStreamOne);
// Perform again the same test on the stream segments after scaling.
final int parallelSegmentsAfterScale = RG_PARALLELISM_ONE * 2 + RG_PARALLELISM_TWO;
final String newReaderGroupName = READER_GROUP + "new";
final Map<Stream, StreamCut> streamCutBeforeScale = slicesBeforeScale.get(slicesBeforeScale.size() - 1);
readerGroupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_ONE)).stream(Stream.of(SCOPE, STREAM_TWO)).startingStreamCuts(streamCutBeforeScale).build());
@Cleanup ReaderGroup newReaderGroup = readerGroupManager.getReaderGroup(newReaderGroupName);
log.info("Checking slices again starting from {}.", streamCutBeforeScale);
List<Map<Stream, StreamCut>> slicesAfterScale = writeEventsAndCheckSlices(clientFactory, newReaderGroup, readerGroupManager, parallelSegmentsAfterScale);
// Perform combinations including StreamCuts before and after the scale event.
slicesAfterScale.remove(0);
slicesBeforeScale.addAll(slicesAfterScale);
log.info("Performing combinations in the whole stream.");
combineSlicesAndVerify(readerGroupManager, clientFactory, parallelSegmentsAfterScale, slicesBeforeScale);
log.info("All events correctly read from StreamCut slices on multiple Streams. StreamCuts test passed.");
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class AutoScaleProcessorTest method writerCreationTest.
@Test(timeout = 10000)
public void writerCreationTest() throws Exception {
EventStreamClientFactory clientFactory = mock(EventStreamClientFactory.class);
CompletableFuture<Void> createWriterLatch = new CompletableFuture<>();
doAnswer(x -> {
createWriterLatch.complete(null);
throw new RuntimeException();
}).when(clientFactory).createEventWriter(any(), any(), any());
TestAutoScaleProcessor failingWriterProcessor = new TestAutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(), clientFactory, executorService());
String segmentStreamName = "scope/myStreamSegment/0.#epoch.0";
failingWriterProcessor.notifyCreated(segmentStreamName);
assertFalse(failingWriterProcessor.isInitializeStarted());
AtomicReference<EventStreamWriter<AutoScaleEvent>> w = new AtomicReference<>();
AssertExtensions.assertThrows("Bootstrap should not be initiated until isInitializeStarted is true", () -> failingWriterProcessor.bootstrapOnce(clientFactory, w), e -> Exceptions.unwrap(e) instanceof RuntimeException);
// report but since the cooldown time hasnt elapsed, no scale event should be attempted. So no writer should be initialized yet.
failingWriterProcessor.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
assertFalse(failingWriterProcessor.isInitializeStarted());
failingWriterProcessor.setTimeMillis(20 * 60000L);
failingWriterProcessor.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
// the above should initiate the bootstrap.
assertTrue(failingWriterProcessor.isInitializeStarted());
// since we are throwing on writer creation, wait until the writer is invoked once at least
createWriterLatch.join();
// now close the processor. The writer future should get cancelled.
failingWriterProcessor.close();
assertTrue(failingWriterProcessor.getWriterFuture().isCancelled());
// create new processor and let the writer get created
TestAutoScaleProcessor processor = new TestAutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(), clientFactory, executorService());
LinkedBlockingQueue<AutoScaleEvent> queue = new LinkedBlockingQueue<>();
EventStreamWriter<AutoScaleEvent> writerMock = createWriter(queue::add);
doAnswer(x -> writerMock).when(clientFactory).createEventWriter(any(), any(), any());
processor.notifyCreated(segmentStreamName);
// report a low rate to trigger a scale down
processor.setTimeMillis(21 * 60000L);
processor.report(segmentStreamName, 10, 0L, 1.0, 1.0, 1.0, 1.0);
assertTrue(processor.isInitializeStarted());
AssertExtensions.assertEventuallyEquals(writerMock, () -> processor.getWriterFuture().join(), 10000L);
AutoScaleEvent event = queue.take();
assertEquals(event.getDirection(), AutoScaleEvent.DOWN);
processor.close();
// create third writer, this time supply the writer directly
EventStreamWriter<AutoScaleEvent> writer = spy(createWriter(e -> {
}));
// verify that when writer is set, we are able to get the processor initialized
TestAutoScaleProcessor processor2 = new TestAutoScaleProcessor(writer, AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").build(), executorService());
processor2.notifyCreated(segmentStreamName);
assertFalse(processor2.isInitializeStarted());
processor2.setTimeMillis(20 * 60000L);
processor2.report(segmentStreamName, 1, 0L, 10.0, 10.0, 10.0, 10.0);
// the above should create a writer future.
assertTrue(processor2.isInitializeStarted());
assertTrue(Futures.isSuccessful(processor2.getWriterFuture()));
processor2.close();
verify(writer, times(1)).close();
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class AutoScaleProcessorTest method testSteadyStateExpiry.
@Test
public void testSteadyStateExpiry() {
HashMap<String, Pair<Long, Long>> map = new HashMap<>();
HashMap<String, Long> lastAccessedTime = new HashMap<>();
List<String> evicted = new ArrayList<>();
@SuppressWarnings("unchecked") SimpleCache<String, Pair<Long, Long>> simpleCache = mock(SimpleCache.class);
AtomicLong clock = new AtomicLong(0L);
Function<Void, Void> cleanup = m -> {
for (Map.Entry<String, Long> e : lastAccessedTime.entrySet()) {
if (e.getValue() < clock.get()) {
lastAccessedTime.remove(e.getKey());
map.remove(e.getKey());
evicted.add(e.getKey());
}
}
// remove all that should have expired.
return null;
};
doAnswer(x -> {
cleanup.apply(null);
return map.get(x.getArgument(0));
}).when(simpleCache).get(anyString());
doAnswer(x -> {
cleanup.apply(null);
map.put(x.getArgument(0), x.getArgument(1));
return map.get(x.getArgument(0));
}).when(simpleCache).put(anyString(), any());
doAnswer(x -> cleanup.apply(null)).when(simpleCache).cleanUp();
AutoScalerConfig config = AutoScalerConfig.builder().with(AutoScalerConfig.CONTROLLER_URI, "tcp://localhost:9090").with(AutoScalerConfig.TLS_ENABLED, false).build();
ClientConfig objectUnderTest = AutoScaleProcessor.prepareClientConfig(config);
@Cleanup EventStreamClientFactory eventStreamClientFactory = EventStreamClientFactory.withScope(SCOPE, objectUnderTest);
@Cleanup TestAutoScaleProcessor monitor = new TestAutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).with(AutoScalerConfig.AUTH_ENABLED, authEnabled).with(AutoScalerConfig.CACHE_CLEANUP_IN_SECONDS, 150).with(AutoScalerConfig.CACHE_EXPIRY_IN_SECONDS, 60).build(), eventStreamClientFactory, executorService(), simpleCache);
String streamSegmentName1 = NameUtils.getQualifiedStreamSegmentName(SCOPE, STREAM1, 0L);
monitor.setTimeMillis(0L);
clock.set(0L);
monitor.notifyCreated(streamSegmentName1);
monitor.put(streamSegmentName1, new ImmutablePair<>(5L, 5L));
monitor.setTimeMillis(30 * 1000L);
clock.set(30L);
monitor.report(streamSegmentName1, 10L, 0L, 10D, 10D, 10D, 10D);
monitor.setTimeMillis(80 * 1000L);
clock.set(80L);
simpleCache.cleanUp();
assertNotNull(monitor.get(streamSegmentName1));
assertNotNull(simpleCache.get(streamSegmentName1));
assertTrue(evicted.isEmpty());
AssertExtensions.assertThrows("NPE should be thrown", () -> new AutoScaleProcessor(null, config, executorService()), e -> e instanceof NullPointerException);
AssertExtensions.assertThrows("NPE should be thrown", () -> new AutoScaleProcessor(null, eventStreamClientFactory, executorService()), e -> e instanceof NullPointerException);
AssertExtensions.assertThrows("NPE should be thrown", () -> new AutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).with(AutoScalerConfig.AUTH_ENABLED, authEnabled).with(AutoScalerConfig.CACHE_CLEANUP_IN_SECONDS, 150).with(AutoScalerConfig.CACHE_EXPIRY_IN_SECONDS, 60).build(), eventStreamClientFactory, null), e -> e instanceof NullPointerException);
AssertExtensions.assertThrows("NPE should be thrown", () -> new AutoScaleProcessor(AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).with(AutoScalerConfig.AUTH_ENABLED, authEnabled).with(AutoScalerConfig.CACHE_CLEANUP_IN_SECONDS, 150).with(AutoScalerConfig.CACHE_EXPIRY_IN_SECONDS, 60).build(), eventStreamClientFactory, null, simpleCache), e -> e instanceof NullPointerException);
AssertExtensions.assertThrows("NPE should be thrown", () -> new AutoScaleProcessor(null, eventStreamClientFactory, executorService(), simpleCache), e -> e instanceof NullPointerException);
monitor.notifySealed(streamSegmentName1);
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class TestUtils method readNextEventMessages.
/**
* Returns the specified number of unread messages from the given {@code scope}/{@code stream}.
*
* @param scope the scope
* @param stream the stream
* @param numMessages the number of event messages to read
* @param readerClientConfig the {@link ClientConfig} object to use to connect to the server
* @param readerGroup the name of the reader group application
* @return the event messages
* @throws NullPointerException if {@code scope} or {@code stream} or {@writerClientConfig} is null
* @throws IllegalArgumentException if {@code numMessages} < 1
* @throws RuntimeException if any exception is thrown by the client
*/
public static List<String> readNextEventMessages(@NonNull String scope, @NonNull String stream, int numMessages, @NonNull ClientConfig readerClientConfig, @NonNull String readerGroup) {
Preconditions.checkArgument(numMessages > 0);
@Cleanup EventStreamClientFactory readerClientFactory = EventStreamClientFactory.withScope(scope, readerClientConfig);
log.debug("Created the readerClientFactory");
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).disableAutomaticCheckpoints().build();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, readerClientConfig);
readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig);
log.debug("Created reader group with name {}", readerGroup);
@Cleanup EventStreamReader<String> reader = readerClientFactory.createReader("readerId", readerGroup, new JavaSerializer<String>(), ReaderConfig.builder().initialAllocationDelay(0).build());
log.debug("Created an event reader");
// Keeping the read timeout large so that there is ample time for reading the event even in
// case of abnormal delays in test environments.
List<String> result = new ArrayList<>();
for (int i = 0; i < numMessages; i++) {
result.add(reader.readNextEvent(20000).getEvent());
}
log.info("Done reading {} events", numMessages);
return result;
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class DelegationTokenTest method writeAnEvent.
private void writeAnEvent(int tokenTtlInSeconds) throws ExecutionException, InterruptedException {
ClusterWrapper pravegaCluster = ClusterWrapper.builder().authEnabled(true).tokenTtlInSeconds(600).build();
try {
pravegaCluster.start();
String scope = "testscope";
String streamName = "teststream";
int numSegments = 1;
String message = "test message";
ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create(pravegaCluster.controllerUri())).credentials(new DefaultCredentials("1111_aaaa", "admin")).build();
log.debug("Done creating client config.");
createScopeStream(scope, streamName, numSegments, clientConfig);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build());
// Note: A TokenException is thrown here if token verification fails on the server.
writer.writeEvent(message).get();
log.debug("Done writing message '{}' to stream '{} / {}'", message, scope, streamName);
} finally {
pravegaCluster.close();
}
}
Aggregations