use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndReaderGroupTest method testDeleteReaderGroup.
@Test(timeout = 30000)
public void testDeleteReaderGroup() throws Exception {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testDeleteReaderGroup";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
// Create a ReaderGroup
String groupName = "testDeleteReaderGroup-group";
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
// Create a Reader
EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build());
// Write events into the stream.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
EventRead<String> eventRead = reader.readNextEvent(10000);
assertEquals("data1", eventRead.getEvent());
// Close the reader, this internally invokes ReaderGroup#readerOffline
reader.close();
// delete the readerGroup.
groupManager.deleteReaderGroup(groupName);
// create a new readerGroup with the same name.
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
reader = clientFactory.createReader("reader1", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
eventRead = reader.readNextEvent(10000);
assertEquals("data1", eventRead.getEvent());
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndReaderGroupTest method testResetNonSubscriberToSubscriberReaderGroup.
@Test(timeout = 30000)
public void testResetNonSubscriberToSubscriberReaderGroup() throws InterruptedException, ExecutionException {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testResetNonSubscriberToSubscriberReaderGroup";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
String group = "testResetNonSubscriberToSubscriberReaderGroup-group";
// Create a ReaderGroup
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
List<String> subs = controller.listSubscribers("test", streamName).get();
assertFalse("Subscriber list contains required reader group", subs.contains("test/" + group));
ReaderGroup subGroup = groupManager.getReaderGroup(group);
subGroup.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).build());
subs = controller.listSubscribers("test", streamName).get();
assertTrue("Subscriber list does not contain required reader group", subs.contains("test/" + group));
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndTransactionOrderTest method setUp.
@Before
public void setUp() throws Exception {
zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, serviceHost, servicePort, Config.HOST_STORE_CONTAINER_COUNT);
controller = controllerWrapper.getController();
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
server = new PravegaConnectionListener(false, false, "localhost", servicePort, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), SecurityConfigDefaults.TLS_PROTOCOL_VERSION);
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(2000).groupRefreshTimeMillis(1000).stream("test/test").build());
reader = clientFactory.createReader("1", "readergrp", new IntegerSerializer(), ReaderConfig.builder().build());
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndStatsTest method testStatsCount.
@Test(timeout = 10000)
@SuppressWarnings("deprecation")
public void testStatsCount() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
@Cleanup EventStreamClientFactory clientFactory = new ClientFactoryImpl("test", controller, ClientConfig.builder().build());
EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(10000).build();
@Cleanup EventStreamWriter<String> eventWriter = clientFactory.createEventWriter("test", new JavaSerializer<>(), writerConfig);
@Cleanup TransactionalEventStreamWriter<String> txnWriter = clientFactory.createTransactionalEventWriter("test", new JavaSerializer<>(), writerConfig);
String[] tags = segmentTags(NameUtils.getQualifiedStreamSegmentName("test", "test", 0L));
for (int i = 0; i < 10; i++) {
eventWriter.writeEvent("test").get();
}
assertEventuallyEquals(10, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_EVENTS, tags).count()), 2000);
assertEventuallyEquals(190, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_BYTES, tags).count()), 100);
Transaction<String> transaction = txnWriter.beginTxn();
for (int i = 0; i < 10; i++) {
transaction.writeEvent("0", "txntest1");
}
assertEventuallyEquals(10, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_EVENTS, tags).count()), 2000);
assertEventuallyEquals(190, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_BYTES, tags).count()), 100);
transaction.commit();
assertEventuallyEquals(20, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_EVENTS, tags).count()), 10000);
assertEventuallyEquals(420, () -> (int) (statsRecorder.getRegistry().counter(SEGMENT_WRITE_BYTES, tags).count()), 100);
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testSegmentTruncationWhileReading.
/**
* This test checks the behavior of a reader (or group of readers) based on whether segment truncation takes place
* while reading (first part of the test) or before starting reading (second part).
*
* @throws InterruptedException If the current thread is interrupted while waiting for the Controller service.
*/
@Test(timeout = 60000)
public void testSegmentTruncationWhileReading() throws InterruptedException {
final int totalEvents = 100;
final String scope = "truncationTests";
final String streamName = "testSegmentTruncationWhileReading";
final String readerGroupName = "RGTestSegmentTruncationWhileReading";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).join();
controller.createStream(scope, streamName, config).join();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// Write half of totalEvents to the Stream.
writeEvents(clientFactory, streamName, totalEvents / 2);
// Seal current segment (0) and split it into two segments (1,2).
Stream stream = new StreamImpl(scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
assertTrue(controller.scaleStream(stream, Lists.newArrayList(0L), map, executorService()).getFuture().join());
long one = computeSegmentId(1, 1);
long two = computeSegmentId(2, 1);
// Write rest of events to the new Stream segments.
ReadWriteUtils.writeEvents(clientFactory, streamName, totalEvents, totalEvents / 2);
// Instantiate readers to consume from Stream.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(100).stream(Stream.of(scope, streamName)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(String.valueOf(0), readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
int read = 0;
while (read < 75) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
// Let readers to consume some events and truncate segment while readers are consuming events
Exceptions.handleInterrupted(() -> Thread.sleep(500));
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(one, 0L);
streamCutPositions.put(two, 0L);
assertTrue(controller.truncateStream(scope, streamName, streamCutPositions).join());
// Wait for readers to complete and assert that they have read all the events (totalEvents).
while (read < totalEvents) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
assertEquals(read, totalEvents);
assertEquals(null, reader.readNextEvent(0).getEvent());
// Assert that from the truncation call onwards, the available segments are the ones after scaling.
List<Long> currentSegments = controller.getCurrentSegments(scope, streamName).join().getSegments().stream().map(Segment::getSegmentId).sorted().collect(toList());
currentSegments.removeAll(Lists.newArrayList(one, two));
assertTrue(currentSegments.isEmpty());
// The new set of readers, should only read the events beyond truncation point (segments 1 and 2).
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
List<CompletableFuture<Integer>> futures = readEvents(clientFactory, newReaderGroupName, 1);
Futures.allOf(futures).join();
assertEquals((int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get(), totalEvents / 2);
}
Aggregations