use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class BoundedStreamReaderTest method testBoundedStreamWithTruncationTest.
@Test(timeout = 60000)
public void testBoundedStreamWithTruncationTest() throws Exception {
createScope(SCOPE);
createStream(STREAM3);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM3, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get();
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
// Streamcut pointing to event 2.
StreamCut offset30SC = getStreamCut(STREAM3, 30L, 0);
StreamCut offset60SC = getStreamCut(STREAM3, 60L, 0);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM3), // startStreamCut points to second event in the stream.
offset30SC, // endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60)
offset60SC).build());
final ReaderGroup rg = groupManager.getReaderGroup("group");
// Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// 2. Verify if endStreamCut configuration is enforced.
readAndVerify(reader, 2);
// The following read should not return events 3, 4 due to the endStreamCut configuration.
Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent());
truncateStream(STREAM3, offset60SC);
// Truncation should not affect the reader as it is already post the truncation point.
Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent());
// Reset RG with startStreamCut which is already truncated.
rg.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM3), offset30SC, StreamCut.UNBOUNDED).build());
verifyReinitializationRequiredException(reader);
// Create a reader
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", "group", serializer, ReaderConfig.builder().build());
assertThrows(TruncatedDataException.class, () -> reader2.readNextEvent(10000));
// subsequent read should return data present post truncation, Event3 is returned here since stream was truncated @ offset 30 * 2.
readAndVerify(reader2, 3);
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class BoundedStreamReaderTest method testReaderGroupWithSameBounds.
@Test(timeout = 60000)
public void testReaderGroupWithSameBounds() throws Exception {
createScope(SCOPE);
createStream(STREAM1);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build());
// 1. Prep the stream with data.
// Write events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
// 2. Create a StreamCut Pointing to offset 30L
StreamCut streamCut = getStreamCut(STREAM1, 30L, 0);
// 3. Create a ReaderGroup where the lower and upper bound are the same.
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM1), streamCut, streamCut).build());
// 4. Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// 5. Verify if configuration is enforced.
Assert.assertNull("Null is expected", reader.readNextEvent(1000).getEvent());
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class BoundedStreamReaderTest method testBoundedStreamWithScaleTest.
@Test(timeout = 60000)
public void testBoundedStreamWithScaleTest() throws Exception {
createScope(SCOPE);
createStream(STREAM1);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
// 2.Scale stream
Map<Double, Double> newKeyRanges = new HashMap<>();
newKeyRanges.put(0.0, 0.33);
newKeyRanges.put(0.33, 0.66);
newKeyRanges.put(0.66, 1.0);
scaleStream(STREAM1, newKeyRanges);
// 3.Write three events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(5)).get();
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
ReaderGroupConfig readerGroupCfg1 = ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM1), // startStreamCut points to the current HEAD of stream
StreamCut.UNBOUNDED, // endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60)
getStreamCut(STREAM1, 60L, 0)).build();
groupManager.createReaderGroup("group", readerGroupCfg1);
ReaderGroup readerGroup = groupManager.getReaderGroup("group");
// Create a reader
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId1", "group", serializer, ReaderConfig.builder().build());
// 2. Verify if endStreamCut configuration is enforced.
readAndVerify(reader1, 1, 2);
// The following read should not return events 3, 4 due to the endStreamCut configuration.
Assert.assertNull("Null is expected", reader1.readNextEvent(2000).getEvent());
final ReaderGroupConfig readerGroupCfg2 = ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM1), getStreamCut(STREAM1, 60L, 0), // endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60)
getStreamCut(STREAM1, 90L, 1, 2, 3)).build();
readerGroup.resetReaderGroup(readerGroupCfg2);
verifyReinitializationRequiredException(reader1);
// Create a reader
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId2", "group", serializer, ReaderConfig.builder().build());
assertNull(reader2.readNextEvent(100).getEvent());
readerGroup.initiateCheckpoint("c1", executorService());
readAndVerify(reader2, 3, 4, 5);
Assert.assertNull("Null is expected", reader2.readNextEvent(2000).getEvent());
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class MultiReadersEndToEndTest method runTest.
private void runTest(final Set<String> streamNames, final int numParallelReaders, final int numSegments) throws Exception {
@Cleanup StreamManager streamManager = StreamManager.create(ClientConfig.builder().controllerURI(SETUP_UTILS.getControllerUri()).build());
streamManager.createScope(SETUP_UTILS.getScope());
streamNames.stream().forEach(stream -> {
streamManager.createStream(SETUP_UTILS.getScope(), stream, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(numSegments)).build());
log.info("Created stream: {}", stream);
});
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SETUP_UTILS.getScope(), ClientConfig.builder().controllerURI(SETUP_UTILS.getControllerUri()).build());
streamNames.stream().forEach(stream -> {
@Cleanup EventStreamWriter<Integer> eventWriter = clientFactory.createEventWriter(stream, new IntegerSerializer(), EventWriterConfig.builder().build());
for (Integer i = 0; i < NUM_TEST_EVENTS; i++) {
eventWriter.writeEvent(String.valueOf(i), i);
}
eventWriter.flush();
log.info("Wrote {} events", NUM_TEST_EVENTS);
});
final String readerGroupName = "testreadergroup" + RandomStringUtils.randomAlphanumeric(10).toLowerCase();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SETUP_UTILS.getScope(), ClientConfig.builder().controllerURI(SETUP_UTILS.getControllerUri()).build());
ReaderGroupConfig.ReaderGroupConfigBuilder builder = ReaderGroupConfig.builder();
streamNames.forEach(s -> builder.stream(Stream.of(SETUP_UTILS.getScope(), s)));
readerGroupManager.createReaderGroup(readerGroupName, builder.build());
Collection<Integer> read = readAllEvents(numParallelReaders, clientFactory, readerGroupName, numSegments);
Assert.assertEquals(NUM_TEST_EVENTS * streamNames.size(), read.size());
// Check unique events.
Assert.assertEquals(NUM_TEST_EVENTS, new TreeSet<>(read).size());
readerGroupManager.deleteReaderGroup(readerGroupName);
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class ReadWithReadPermissionsTest method readsFromADifferentScopeTest.
@Test
public void readsFromADifferentScopeTest() {
String marketDataWriter = "writer";
String marketDataReader = "reader";
String password = "test-password";
String marketDataScope = "marketdata";
String computeScope = "compute";
String stream1 = "stream1";
final Map<String, String> passwordInputFileEntries = new HashMap<>();
passwordInputFileEntries.put(marketDataWriter, String.join(";", // Allows user to create the "marketdata" scope, for this test
"prn::/,READ_UPDATE", // Allows user to create stream (and other scope children)
"prn::/scope:marketdata,READ_UPDATE", // Provides user all access to child objects of the "marketdata" scope
"prn::/scope:marketdata/*,READ_UPDATE"));
passwordInputFileEntries.put(marketDataReader, String.join(";", // Allows use to create the "compute" home scope
"prn::/,READ_UPDATE", // Allows user to create reader-group under its home scope
"prn::/scope:compute,READ_UPDATE", // Provides user all access to child objects of the "compute" scope
"prn::/scope:compute/*,READ_UPDATE", // Provides use read access to the "marketdata/stream1" stream.
"prn::/scope:marketdata/stream:stream1,READ"));
// Setup and run the servers
@Cleanup final ClusterWrapper cluster = ClusterWrapper.builder().authEnabled(true).tokenSigningKeyBasis("secret").tokenTtlInSeconds(600).rgWritesWithReadPermEnabled(false).passwordAuthHandlerEntries(TestUtils.preparePasswordInputFileEntries(passwordInputFileEntries, password)).build();
cluster.start();
// Prepare a client config for the `marketDataWriter`, whose home scope is "marketdata"
final ClientConfig writerClientConfig = ClientConfig.builder().controllerURI(URI.create(cluster.controllerUri())).credentials(new DefaultCredentials(password, marketDataWriter)).build();
// Create scope/stream `marketdata/stream1`
TestUtils.createScopeAndStreams(writerClientConfig, marketDataScope, Arrays.asList(stream1));
// Write a message to stream `marketdata/stream1`
TestUtils.writeDataToStream(marketDataScope, stream1, "test message", writerClientConfig);
// Prepare a client config for `marketDataReader`, whose home scope is "compute"
ClientConfig readerClientConfig = ClientConfig.builder().controllerURI(URI.create(cluster.controllerUri())).credentials(new DefaultCredentials(password, marketDataReader)).build();
// Create scope `compute` (without any streams)
TestUtils.createScopeAndStreams(readerClientConfig, computeScope, new ArrayList<>());
// Create a reader group config that enables a user to read data from `marketdata/stream1`
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(marketDataScope, stream1)).disableAutomaticCheckpoints().build();
// Create a reader-group for user `marketDataReader` in `compute` scope, which is its home scope.
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(computeScope, readerClientConfig);
readerGroupManager.createReaderGroup("testRg", readerGroupConfig);
@Cleanup EventStreamClientFactory readerClientFactory = EventStreamClientFactory.withScope(computeScope, readerClientConfig);
@Cleanup EventStreamReader<String> reader = readerClientFactory.createReader("readerId", "testRg", new JavaSerializer<String>(), ReaderConfig.builder().initialAllocationDelay(0).build());
String readMessage = reader.readNextEvent(5000).getEvent();
assertEquals("test message", readMessage);
}
Aggregations