use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class BoundedStreamReaderTest method testBoundedStreamTest.
@Test(timeout = 60000)
public void testBoundedStreamTest() throws Exception {
createScope(SCOPE);
createStream(STREAM1);
createStream(STREAM2);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build());
// Prep the stream with data.
// 1.Write events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get();
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM1), // startStreamCut points to the current HEAD of stream
StreamCut.UNBOUNDED, // endStreamCut points to the offset after two events.(i.e 2 * 30(event size) = 60)
getStreamCut(STREAM1, 60L, 0)).stream(Stream.of(SCOPE, STREAM2)).build());
// Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// 2. Verify if endStreamCut configuration is enforced.
readAndVerify(reader, 1, 2);
// The following read should not return events 3, 4 due to the endStreamCut configuration.
Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent());
// 3. Write events to the STREAM2.
@Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(STREAM2, serializer, EventWriterConfig.builder().build());
writer2.writeEvent(keyGenerator.get(), getEventData.apply(5)).get();
writer2.writeEvent(keyGenerator.get(), getEventData.apply(6)).get();
// 4. Verify that events can be read from STREAM2. (Events from STREAM1 are not read since endStreamCut is reached).
readAndVerify(reader, 5, 6);
Assert.assertNull("Null is expected", reader.readNextEvent(2000).getEvent());
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class LargeEventTest method testReadWriteWithSegmentStoreRestart.
@Test(timeout = 60000)
public void testReadWriteWithSegmentStoreRestart() throws ExecutionException, InterruptedException {
String readerGroupName = "testLargeEventFailoverReaderGroup";
String streamName = "SegmentStoreRestart";
StreamConfiguration config = getStreamConfiguration(NUM_READERS);
createScopeStream(SCOPE_NAME, streamName, config);
int events = 1;
AtomicInteger generation = new AtomicInteger(0);
merge(eventsWrittenToPravega, generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE));
eventsReadFromPravega = readWriteCycle(streamName, readerGroupName, eventsWrittenToPravega);
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
// Passing in this restart callback will override the default behavior of closing the connection.
Runnable restart = () -> {
// Reset the server, in effect clearing the AppendProcessor and PravegaRequestProcessor.
this.server.close();
this.server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
this.server.startListening();
};
restart.run();
Map<Integer, List<ByteBuffer>> data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
merge(eventsWrittenToPravega, data);
eventsReadFromPravega = readWriteCycle(streamName, readerGroupName, data);
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
// Clear objects necessary for read-write validation.
stopReadFlag = new AtomicBoolean(false);
eventsReadFromPravega.clear();
eventReadCount.set(0);
// Generate new data.
data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
merge(eventsWrittenToPravega, data);
AtomicInteger sendCount = new AtomicInteger(0);
Supplier<Boolean> predicate = () -> sendCount.getAndIncrement() == CLOSE_WRITE_COUNT;
// Now try the restart *during* a large event write.
AtomicReference<Boolean> latch = new AtomicReference<>(true);
try (ConnectionExporter connectionFactory = new ConnectionExporter(ClientConfig.builder().build(), latch, restart, predicate);
ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
// Start writing events to the stream.
val writers = createEventWriters(streamName, NUM_WRITERS, clientFactory, data);
Futures.allOf(writers).get();
// Create a ReaderGroup.
createReaderGroup(readerGroupName, readerGroupManager, streamName);
// Create Readers.
val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, eventsReadFromPravega);
stopReadFlag.set(true);
Futures.allOf(readers).get();
readerGroupManager.deleteReaderGroup(readerGroupName);
}
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
validateCleanUp(streamName);
}
Aggregations