use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class EventProcessorTest method testEventProcessorFailover.
@Test(timeout = 60000)
public void testEventProcessorFailover() throws Exception {
final String scope = "controllerScope2";
final String streamName = "stream2";
final String readerGroup = "readerGroup2";
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
controller.createScope(scope).join();
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
controller.createStream(scope, streamName, config).join();
eventSerializer = new EventSerializer<>(new TestSerializer());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup EventStreamWriter<TestEvent> producer = clientFactory.createEventWriter(streamName, eventSerializer, EventWriterConfig.builder().build());
TestEvent event1 = new TestEvent(0);
producer.writeEvent("key", event1).join();
TestEvent event2 = new TestEvent(1);
producer.writeEvent("key", event2).join();
producer.flush();
EventProcessorSystem system = new EventProcessorSystemImpl("Controller", host, scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
CheckpointConfig checkpointConfig = CheckpointConfig.builder().type(CheckpointConfig.Type.None).build();
EventProcessorGroupConfig eventProcessorGroupConfig = EventProcessorGroupConfigImpl.builder().eventProcessorCount(1).readerGroupName(readerGroup).streamName(streamName).checkpointConfig(checkpointConfig).build();
LinkedBlockingQueue<TestEvent> eventsProcessed = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig = EventProcessorConfig.<TestEvent>builder().supplier(() -> new EventProcessor<TestEvent>() {
@Override
protected void process(TestEvent event, Position position) {
try {
eventsProcessed.offer(event);
// keep sending null position
getCheckpointer().store(null);
} catch (CheckpointStoreException e) {
e.printStackTrace();
}
}
}).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).build();
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup = system.createEventProcessorGroup(eventProcessorConfig, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup.awaitRunning();
// wait until both events are read
assertEquals(event1, eventsProcessed.take());
assertEquals(event2, eventsProcessed.take());
assertTrue(eventsProcessed.isEmpty());
// shutdown event processor
// upon shutdown readerGroup.offline and reader.close should have been called.
eventProcessorGroup.stopAsync();
eventProcessorGroup.awaitTerminated();
@Cleanup ConnectionFactory connectionFactory2 = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory2 = new ClientFactoryImpl(scope, controller, connectionFactory2);
system = new EventProcessorSystemImpl("Controller2", host, scope, clientFactory2, new ReaderGroupManagerImpl(scope, controller, clientFactory2));
EventProcessorConfig<TestEvent> eventProcessorConfig2 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new EventProcessor<TestEvent>() {
@Override
protected void process(TestEvent event, Position position) {
try {
eventsProcessed.offer(event);
getCheckpointer().store(null);
} catch (CheckpointStoreException e) {
e.printStackTrace();
}
}
}).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).build();
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup2 = system.createEventProcessorGroup(eventProcessorConfig2, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup2.awaitRunning();
// verify that both events are read again
assertEquals(event1, eventsProcessed.take());
assertEquals(event2, eventsProcessed.take());
assertTrue(eventsProcessed.isEmpty());
eventProcessorGroup2.stopAsync();
eventProcessorGroup2.awaitTerminated();
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class EventProcessorTest method testEventProcessorRebalance.
@Test(timeout = 60000)
public void testEventProcessorRebalance() throws Exception {
final String scope = "scope";
final String streamName = "stream";
final String readerGroupName = "readerGroup";
controller.createScope(scope).join();
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(4)).build();
controller.createStream(scope, streamName, config).join();
eventSerializer = new EventSerializer<>(new TestSerializer());
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
CheckpointConfig.CheckpointPeriod period = CheckpointConfig.CheckpointPeriod.builder().numEvents(1).numSeconds(1).build();
CheckpointConfig checkpointConfig = CheckpointConfig.builder().type(CheckpointConfig.Type.Periodic).checkpointPeriod(period).build();
EventProcessorGroupConfig eventProcessorGroupConfig = EventProcessorGroupConfigImpl.builder().eventProcessorCount(1).readerGroupName(readerGroupName).streamName(streamName).checkpointConfig(checkpointConfig).build();
LinkedBlockingQueue<Integer> queue1 = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig1 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor2(queue1)).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).minRebalanceIntervalMillis(Duration.ofMillis(100).toMillis()).build();
// create a group and verify that all events can be written and read by readers in this group.
EventProcessorSystem system1 = new EventProcessorSystemImpl("Controller", "process1", scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup1 = system1.createEventProcessorGroup(eventProcessorConfig1, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup1.awaitRunning();
log.info("first event processor started");
@Cleanup EventStreamWriter<TestEvent> writer = clientFactory.createEventWriter(streamName, eventSerializer, EventWriterConfig.builder().build());
// write 10 events and read them back from the queue passed to first event processor's
List<Integer> input = IntStream.range(0, 10).boxed().collect(Collectors.toList());
ConcurrentSkipListSet<Integer> output = new ConcurrentSkipListSet<>();
for (int val : input) {
writer.writeEvent(new TestEvent(val));
}
writer.flush();
// now wait until all the entries are read back.
for (int i = 0; i < 10; i++) {
// read 10 events back
Integer entry = queue1.take();
output.add(entry);
}
assertEquals(10, output.size());
log.info("first event processor read all the messages");
LinkedBlockingQueue<Integer> queue2 = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig2 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor2(queue2)).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).minRebalanceIntervalMillis(Duration.ofMillis(100).toMillis()).build();
// add another system and event processor group (effectively add a new set of readers to the readergroup)
EventProcessorSystem system2 = new EventProcessorSystemImpl("Controller", "process2", scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup2 = system2.createEventProcessorGroup(eventProcessorConfig2, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup2.awaitRunning();
log.info("second event processor started");
AtomicInteger queue1EntriesFound = new AtomicInteger(0);
AtomicInteger queue2EntriesFound = new AtomicInteger(0);
ConcurrentSkipListSet<Integer> output2 = new ConcurrentSkipListSet<>();
// wait until rebalance may have happened.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
AtomicBoolean allAssigned = new AtomicBoolean(false);
Futures.loop(() -> !allAssigned.get(), () -> Futures.delayedFuture(Duration.ofMillis(100), executorService()).thenAccept(v -> {
ReaderSegmentDistribution distribution = readerGroup.getReaderSegmentDistribution();
int numberOfReaders = distribution.getReaderSegmentDistribution().size();
allAssigned.set(numberOfReaders == 2 && distribution.getReaderSegmentDistribution().values().stream().noneMatch(x -> x == 0));
}), executorService()).join();
// write 10 new events
for (int val : input) {
writer.writeEvent(new TestEvent(val));
}
writer.flush();
// wait until at least one event is read from queue2
CompletableFuture.allOf(CompletableFuture.runAsync(() -> {
while (output2.size() < 10) {
Integer entry = queue1.poll();
if (entry != null) {
log.info("entry read from queue 1: {}", entry);
queue1EntriesFound.incrementAndGet();
output2.add(entry);
} else {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
}), CompletableFuture.runAsync(() -> {
while (output2.size() < 10) {
Integer entry = queue2.poll();
if (entry != null) {
log.info("entry read from queue 2: {}", entry);
queue2EntriesFound.incrementAndGet();
output2.add(entry);
} else {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
})).join();
assertTrue(queue1EntriesFound.get() > 0);
assertTrue(queue2EntriesFound.get() > 0);
assertEquals(10, output2.size());
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class EndToEndTruncationTest method testWriteDuringTruncationAndDeletion.
@Test(timeout = 30000)
public void testWriteDuringTruncationAndDeletion() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String streamName = "testWriteDuringTruncationAndDeletion";
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
// routing key "0" translates to key 0.8. This write happens to segment 1.
writer.writeEvent("0", "truncationTest1").get();
// scale down to one segment.
Stream stream = new StreamImpl("test", streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
assertTrue("Stream Scale down", controller.scaleStream(stream, Lists.newArrayList(0L, 1L), map, executorService()).getFuture().get());
// truncate stream at segment 2, offset 0.
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(computeSegmentId(2, 1), 0L);
assertTrue("Truncate stream", controller.truncateStream("test", streamName, streamCutPositions).get());
// routing key "2" translates to key 0.2.
// this write translates to a write to Segment 0, but since segment 0 is truncated the write should happen on segment 2.
// write to segment 0
writer.writeEvent("2", "truncationTest2").get();
String group = "testWriteDuringTruncationAndDeletion-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("truncationTest2", event.getEvent());
// Seal and Delete stream.
assertTrue(controller.sealStream("test", streamName).get());
assertTrue(controller.deleteStream("test", streamName).get());
// write by an existing writer to a deleted stream should complete exceptionally.
assertFutureThrows("Should throw NoSuchSegmentException", writer.writeEvent("2", "write to deleted stream"), e -> NoSuchSegmentException.class.isAssignableFrom(e.getClass()));
// subsequent writes will throw an exception to the application.
assertThrows(RuntimeException.class, () -> writer.writeEvent("test"));
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class EndToEndReaderGroupTest method testLaggingResetReaderGroup.
@Test(timeout = 30000)
public void testLaggingResetReaderGroup() throws Exception {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope("test").get();
controller.createStream("test", "testLaggingResetReaderGroup", config).get();
controller.createStream("test", "testLaggingResetReaderGroup2", config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
UUID rgId = UUID.randomUUID();
ReaderGroupConfig rgConf = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/testLaggingResetReaderGroup").retentionType(ReaderGroupConfig.StreamDataRetention.NONE).build();
rgConf = ReaderGroupConfig.cloneConfig(rgConf, rgId, 0L);
// Create a ReaderGroup
groupManager.createReaderGroup("testLaggingResetReaderGroup-group", rgConf);
ReaderGroupConfig updateConf = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/testLaggingResetReaderGroup2").retentionType(ReaderGroupConfig.StreamDataRetention.NONE).build();
updateConf = ReaderGroupConfig.cloneConfig(updateConf, rgId, 0L);
// Update from the controller end
controller.updateReaderGroup("test", "testLaggingResetReaderGroup-group", updateConf).join();
ReaderGroup group = groupManager.getReaderGroup("testLaggingResetReaderGroup-group");
// Reset from client end
group.resetReaderGroup(ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/testLaggingResetReaderGroup").build());
}
use of io.pravega.client.connection.impl.ConnectionFactory in project pravega by pravega.
the class EndToEndReaderGroupTest method testReaderOffline.
@Test(timeout = 30000)
public void testReaderOffline() throws Exception {
StreamConfiguration config = getStreamConfig();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
String scopeName = "test";
String streamName = "testReaderOffline";
controller.createScope(scopeName).get();
controller.createStream(scopeName, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scopeName, controller, connectionFactory);
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scopeName, controller, clientFactory);
String groupName = "testReaderOffline-group";
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scopeName + "/" + streamName).build());
final ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
// create a reader
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> eventRead = reader1.readNextEvent(100);
assertNull("Event read should be null since no events are written", eventRead.getEvent());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", groupName, new JavaSerializer<>(), ReaderConfig.builder().build());
// make reader1 offline
readerGroup.readerOffline("reader1", null);
// write events into the stream.
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
writer.writeEvent("0", "data2").get();
eventRead = reader2.readNextEvent(10000);
assertEquals("data1", eventRead.getEvent());
}
Aggregations