use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.
the class EndToEndAutoScaleUpWithTxnTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
@Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
@Cleanup ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup AutoScaleMonitor autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, false, "localhost", 12345, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", CONFIG).get();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
// Mocking pravega service by putting scale up and scale down requests for the stream
EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(30000).build();
TransactionalEventStreamWriter<String> test = clientFactory.createTransactionalEventWriter("writer", "test", new UTF8StringSerializer(), writerConfig);
// region Successful commit tests
Transaction<String> txn1 = test.beginTxn();
txn1.writeEvent("1");
txn1.flush();
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0 / 3.0);
map.put(1.0 / 3.0, 2.0 / 3.0);
map.put(2.0 / 3.0, 1.0);
Stream stream = new StreamImpl("test", "test");
controller.startScale(stream, Collections.singletonList(0L), map).get();
Transaction<String> txn2 = test.beginTxn();
txn2.writeEvent("2");
txn2.flush();
txn2.commit();
txn1.commit();
Thread.sleep(1000);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().stream("test/test").build());
final EventStreamReader<String> reader = clientFactory.createReader("1", "readergrp", new JavaSerializer<>(), ReaderConfig.builder().build());
String event1 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
String event2 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
assert event1.equals("1");
assert event2.equals("2");
final AtomicBoolean done = new AtomicBoolean(false);
startWriter(test, done);
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().stream().anyMatch(x -> NameUtils.getEpoch(x.getSegmentId()) > 5)) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), executor).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
log.error("Test failed with exception: {}", e);
System.exit(-1);
}
System.exit(0);
}
use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.
the class StreamCutsTest method testReaderGroupCuts.
@Test(timeout = 40000)
public void testReaderGroupCuts() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "fpj was here").get();
writer.writeEvent("0", "fpj was here again").get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup("cuts", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").groupRefreshTimeMillis(0).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("cuts");
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "cuts", new JavaSerializer<>(), ReaderConfig.builder().initialAllocationDelay(0).build());
EventRead<String> firstEvent = reader.readNextEvent(5000);
assertNotNull(firstEvent.getEvent());
assertEquals("fpj was here", firstEvent.getEvent());
readerGroup.initiateCheckpoint("cp1", executor);
EventRead<String> cpEvent = reader.readNextEvent(5000);
assertEquals("cp1", cpEvent.getCheckpointName());
EventRead<String> secondEvent = reader.readNextEvent(5000);
assertNotNull(secondEvent.getEvent());
assertEquals("fpj was here again", secondEvent.getEvent());
Map<Stream, StreamCut> cuts = readerGroup.getStreamCuts();
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", 0L)));
// Scale the stream to verify that we get more segments in the cut.
Stream stream = Stream.of("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 1st scaling");
writer.writeEvent("0", "fpj was here again0").get();
writer.writeEvent("1", "fpj was here again1").get();
EventRead<String> eosEvent = reader.readNextEvent(100);
// Reader does not yet see the data becasue there has been no CP
assertNull(eosEvent.getEvent());
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("cp2", executor);
cpEvent = reader.readNextEvent(100);
EventRead<String> event0 = reader.readNextEvent(100);
EventRead<String> event1 = reader.readNextEvent(100);
cuts = checkpoint.get(5, TimeUnit.SECONDS).asImpl().getPositions();
// Validate the reader did not release the segments before the checkpoint.
// This is important because it means that once the checkpoint is initiated no segments change readers.
Set<String> segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(0, 0)));
validateCuts(readerGroup, cuts, segmentNames);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(executor);
EventRead<String> emptyEvent = reader.readNextEvent(100);
cuts = futureCuts.get();
segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(1, 1)), getQualifiedStreamSegmentName("test", "test", computeSegmentId(2, 1)));
validateCuts(readerGroup, cuts, segmentNames);
// Scale down to verify that the number drops back.
map = new HashMap<>();
map.put(0.0, 1.0);
ArrayList<Long> toSeal = new ArrayList<>();
toSeal.add(computeSegmentId(1, 1));
toSeal.add(computeSegmentId(2, 1));
result = controller.scaleStream(stream, Collections.unmodifiableList(toSeal), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 2nd scaling");
writer.writeEvent("0", "fpj was here again2").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
checkpoint = readerGroup.initiateCheckpoint("cp3", executor);
cpEvent = reader.readNextEvent(100);
assertEquals("cp3", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertTrue(event0.getEvent().endsWith("2"));
cuts = readerGroup.getStreamCuts();
long three = computeSegmentId(3, 2);
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", three)));
// Scale up to 4 segments again.
map = new HashMap<>();
map.put(0.0, 0.25);
map.put(0.25, 0.5);
map.put(0.5, 0.75);
map.put(0.75, 1.0);
result = controller.scaleStream(stream, Collections.singletonList(three), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 3rd scaling");
writer.writeEvent("0", "fpj was here again3").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
readerGroup.initiateCheckpoint("cp4", executor);
cpEvent = reader.readNextEvent(1000);
assertEquals("cp4", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertNotNull(event0.getEvent());
cuts = readerGroup.getStreamCuts();
segmentNames = new HashSet<>();
long four = computeSegmentId(4, 3);
long five = computeSegmentId(5, 3);
long six = computeSegmentId(6, 3);
long seven = computeSegmentId(7, 3);
segmentNames.add(getQualifiedStreamSegmentName("test", "test", four));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", five));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", six));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", seven));
validateCuts(readerGroup, cuts, Collections.unmodifiableSet(segmentNames));
}
use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.
the class LargeEventTest method testReadWriteWithSegmentStoreRestart.
@Test(timeout = 60000)
public void testReadWriteWithSegmentStoreRestart() throws ExecutionException, InterruptedException {
String readerGroupName = "testLargeEventFailoverReaderGroup";
String streamName = "SegmentStoreRestart";
StreamConfiguration config = getStreamConfiguration(NUM_READERS);
createScopeStream(SCOPE_NAME, streamName, config);
int events = 1;
AtomicInteger generation = new AtomicInteger(0);
merge(eventsWrittenToPravega, generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE));
eventsReadFromPravega = readWriteCycle(streamName, readerGroupName, eventsWrittenToPravega);
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
// Passing in this restart callback will override the default behavior of closing the connection.
Runnable restart = () -> {
// Reset the server, in effect clearing the AppendProcessor and PravegaRequestProcessor.
this.server.close();
this.server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
this.server.startListening();
};
restart.run();
Map<Integer, List<ByteBuffer>> data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
merge(eventsWrittenToPravega, data);
eventsReadFromPravega = readWriteCycle(streamName, readerGroupName, data);
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
// Clear objects necessary for read-write validation.
stopReadFlag = new AtomicBoolean(false);
eventsReadFromPravega.clear();
eventReadCount.set(0);
// Generate new data.
data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
merge(eventsWrittenToPravega, data);
AtomicInteger sendCount = new AtomicInteger(0);
Supplier<Boolean> predicate = () -> sendCount.getAndIncrement() == CLOSE_WRITE_COUNT;
// Now try the restart *during* a large event write.
AtomicReference<Boolean> latch = new AtomicReference<>(true);
try (ConnectionExporter connectionFactory = new ConnectionExporter(ClientConfig.builder().build(), latch, restart, predicate);
ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
// Start writing events to the stream.
val writers = createEventWriters(streamName, NUM_WRITERS, clientFactory, data);
Futures.allOf(writers).get();
// Create a ReaderGroup.
createReaderGroup(readerGroupName, readerGroupManager, streamName);
// Create Readers.
val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, eventsReadFromPravega);
stopReadFlag.set(true);
Futures.allOf(readers).get();
readerGroupManager.deleteReaderGroup(readerGroupName);
}
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
validateCleanUp(streamName);
}
use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.
the class EndToEndReaderGroupTest method testDeleteReaderGroup.
@Test(timeout = 30000)
public void testDeleteReaderGroup() throws Exception {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testDeleteReaderGroup";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
// Create a ReaderGroup
String groupName = "testDeleteReaderGroup-group";
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
// Create a Reader
EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build());
// Write events into the stream.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
EventRead<String> eventRead = reader.readNextEvent(10000);
assertEquals("data1", eventRead.getEvent());
// Close the reader, this internally invokes ReaderGroup#readerOffline
reader.close();
// delete the readerGroup.
groupManager.deleteReaderGroup(groupName);
// create a new readerGroup with the same name.
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
reader = clientFactory.createReader("reader1", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
eventRead = reader.readNextEvent(10000);
assertEquals("data1", eventRead.getEvent());
}
use of io.pravega.client.admin.impl.ReaderGroupManagerImpl in project pravega by pravega.
the class EndToEndReaderGroupTest method testResetNonSubscriberToSubscriberReaderGroup.
@Test(timeout = 30000)
public void testResetNonSubscriberToSubscriberReaderGroup() throws InterruptedException, ExecutionException {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testResetNonSubscriberToSubscriberReaderGroup";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
String group = "testResetNonSubscriberToSubscriberReaderGroup-group";
// Create a ReaderGroup
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
List<String> subs = controller.listSubscribers("test", streamName).get();
assertFalse("Subscriber list contains required reader group", subs.contains("test/" + group));
ReaderGroup subGroup = groupManager.getReaderGroup(group);
subGroup.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).build());
subs = controller.listSubscribers("test", streamName).get();
assertTrue("Subscriber list does not contain required reader group", subs.contains("test/" + group));
}
Aggregations