use of io.pravega.client.stream.ReaderSegmentDistribution in project pravega by pravega.
the class ReaderGroupImplTest method readerGroupSegmentDistribution.
@Test
@SuppressWarnings("unchecked")
public void readerGroupSegmentDistribution() {
ReaderGroupState state = mock(ReaderGroupState.class);
when(synchronizer.getState()).thenReturn(state);
Set<String> readers = new HashSet<>();
readers.add("1");
readers.add("2");
readers.add("3");
when(state.getOnlineReaders()).thenReturn(readers);
SegmentWithRange segment = mock(SegmentWithRange.class);
Map<SegmentWithRange, Long> map = Collections.singletonMap(segment, 0L);
when(state.getAssignedSegments(anyString())).thenReturn(map);
when(state.getNumberOfUnassignedSegments()).thenReturn(2);
ReaderSegmentDistribution readerSegmentDistribution = readerGroup.getReaderSegmentDistribution();
Map<String, Integer> distribution = readerSegmentDistribution.getReaderSegmentDistribution();
assertEquals(3, distribution.size());
assertTrue(distribution.containsKey("1"));
assertTrue(distribution.containsKey("2"));
assertTrue(distribution.containsKey("3"));
assertEquals(2, readerSegmentDistribution.getUnassignedSegments());
assertEquals(1, distribution.get("1").intValue());
assertEquals(1, distribution.get("2").intValue());
assertEquals(1, distribution.get("3").intValue());
}
use of io.pravega.client.stream.ReaderSegmentDistribution in project pravega by pravega.
the class EventProcessorGroupImpl method rebalance.
@VisibleForTesting
void rebalance() {
try {
ReaderSegmentDistribution readerSegmentDistribution = readerGroup.getReaderSegmentDistribution();
Map<String, Integer> distribution = readerSegmentDistribution.getReaderSegmentDistribution();
int readerCount = distribution.size();
int unassigned = readerSegmentDistribution.getUnassignedSegments();
int segmentCount = distribution.values().stream().reduce(0, Integer::sum) + unassigned;
// If there are idle readers (no segment assignments, then identify and replace overloaded readers).
boolean idleReaders = distribution.entrySet().stream().anyMatch(x -> !Strings.isNullOrEmpty(x.getKey()) && x.getValue() == 0);
if (idleReaders) {
distribution.forEach((readerId, assigned) -> {
if (!Strings.isNullOrEmpty(readerId)) {
// check if the reader belongs to this group and the reader is eligible for rebalance
if (eventProcessorMap.containsKey(readerId) && isRebalanceCandidate(assigned, readerCount, segmentCount)) {
replaceCell(readerId);
}
}
});
}
} catch (Exception e) {
Throwable realException = Exceptions.unwrap(e);
log.warn("Re-balance failed with exception {} {}", realException.getClass().getSimpleName(), e.getMessage());
}
}
use of io.pravega.client.stream.ReaderSegmentDistribution in project pravega by pravega.
the class EventProcessorTest method testEventProcessorGroupRebalance.
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void testEventProcessorGroupRebalance() throws CheckpointStoreException, ReinitializationRequiredException {
String systemName = "rebalance";
String readerGroupName = "rebalance";
CheckpointStore checkpointStore = spy(CheckpointStoreFactory.createInMemoryStore());
checkpointStore.addReaderGroup(PROCESS, readerGroupName);
EventProcessorGroupConfig config = createEventProcessorGroupConfig(2);
EventStreamClientFactory clientFactory = Mockito.mock(EventStreamClientFactory.class);
EventStreamReader<TestEvent> reader = Mockito.mock(EventStreamReader.class);
Mockito.when(reader.readNextEvent(anyLong())).thenReturn(Mockito.mock(EventReadImpl.class));
Mockito.when(clientFactory.createReader(anyString(), anyString(), any(), any())).thenAnswer(x -> reader);
Mockito.when(clientFactory.<String>createEventWriter(anyString(), any(), any())).thenReturn(new EventStreamWriterMock<>());
ReaderGroup readerGroup = Mockito.mock(ReaderGroup.class);
Mockito.when(readerGroup.getGroupName()).thenReturn(readerGroupName);
ReaderGroupManager readerGroupManager = Mockito.mock(ReaderGroupManager.class);
Mockito.when(readerGroupManager.getReaderGroup(anyString())).then(invocation -> readerGroup);
EventProcessorSystemImpl system = new EventProcessorSystemImpl(systemName, PROCESS, SCOPE, clientFactory, readerGroupManager);
EventProcessorConfig<TestEvent> eventProcessorConfig = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor(false)).serializer(new EventSerializer<>()).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(config).minRebalanceIntervalMillis(0L).build();
// Create EventProcessorGroup.
@Cleanup EventProcessorGroupImpl<TestEvent> group = (EventProcessorGroupImpl<TestEvent>) system.createEventProcessorGroup(eventProcessorConfig, checkpointStore, executor);
group.awaitRunning();
ConcurrentHashMap<String, EventProcessorCell<TestEvent>> eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
List<String> readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
// region case 1: even distribution - 2 readers with 2 segments each
HashMap<String, Integer> distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 2);
ReaderSegmentDistribution readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. no new readers should be added or existing reader removed.
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
// the original readers should not have been replaced
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
// region case 2: two external readers with 0 segment assignment and 2 overloaded readers in the
// readergroup. unassigned = 0
String reader2 = "reader2";
String reader3 = "reader3";
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 2);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. this should replace existing overloaded readers
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertFalse(eventProcessorMap.containsKey(readerIds.get(0)));
assertFalse(eventProcessorMap.containsKey(readerIds.get(1)));
Enumeration<String> keys = eventProcessorMap.keys();
String firstReplacement = keys.nextElement();
String secondReplacement = keys.nextElement();
// verify that checkpointstore.addreader is called twice
verify(checkpointStore, times(2)).addReader(any(), any(), eq(firstReplacement));
verify(checkpointStore, times(2)).addReader(any(), any(), eq(secondReplacement));
// update the readers in the readergroup
readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
// endregion
// region case 3: even distribution among 4 readers
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 1);
distribution.put(readerIds.get(1), 1);
distribution.put(reader2, 1);
distribution.put(reader3, 1);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. nothing should happen
group.rebalance();
// no change to the group
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
// region case 4: with 1 overloaded reader and 2 unassigned segments
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 0);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(2).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. overloaded reader should be replaced
group.rebalance();
// reader0 should have been replaced.
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertFalse(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 0);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(2).build();
// case 5: region failure cases
doThrow(new RuntimeException("reader group throws")).when(readerGroup).getReaderSegmentDistribution();
// exception should be handled and there should be no state change in event processor
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// now reset the distribution
doReturn(readerSegmentDistribution).when(readerGroup).getReaderSegmentDistribution();
// throw from checkpoint store
doThrow(new CheckpointStoreException("checkpoint store exception")).when(checkpointStore).addReader(anyString(), anyString(), anyString());
// exception should have been thrown and handled
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
// Stop the group, and await its termmination.
group.stopAsync();
group.awaitTerminated();
// call rebalance after shutdown such that replace cell is called - this should throw precondition failed exception
readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 2);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// calling rebalance on terminated group will result in Precondition failure with exception logged and ignored
// and no rebalance occurring.
// exception should have been thrown and handled
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
}
use of io.pravega.client.stream.ReaderSegmentDistribution in project pravega by pravega.
the class EventProcessorTest method testEventProcessorRebalance.
@Test(timeout = 60000)
public void testEventProcessorRebalance() throws Exception {
final String scope = "scope";
final String streamName = "stream";
final String readerGroupName = "readerGroup";
controller.createScope(scope).join();
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(4)).build();
controller.createStream(scope, streamName, config).join();
eventSerializer = new EventSerializer<>(new TestSerializer());
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
CheckpointConfig.CheckpointPeriod period = CheckpointConfig.CheckpointPeriod.builder().numEvents(1).numSeconds(1).build();
CheckpointConfig checkpointConfig = CheckpointConfig.builder().type(CheckpointConfig.Type.Periodic).checkpointPeriod(period).build();
EventProcessorGroupConfig eventProcessorGroupConfig = EventProcessorGroupConfigImpl.builder().eventProcessorCount(1).readerGroupName(readerGroupName).streamName(streamName).checkpointConfig(checkpointConfig).build();
LinkedBlockingQueue<Integer> queue1 = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig1 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor2(queue1)).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).minRebalanceIntervalMillis(Duration.ofMillis(100).toMillis()).build();
// create a group and verify that all events can be written and read by readers in this group.
EventProcessorSystem system1 = new EventProcessorSystemImpl("Controller", "process1", scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup1 = system1.createEventProcessorGroup(eventProcessorConfig1, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup1.awaitRunning();
log.info("first event processor started");
@Cleanup EventStreamWriter<TestEvent> writer = clientFactory.createEventWriter(streamName, eventSerializer, EventWriterConfig.builder().build());
// write 10 events and read them back from the queue passed to first event processor's
List<Integer> input = IntStream.range(0, 10).boxed().collect(Collectors.toList());
ConcurrentSkipListSet<Integer> output = new ConcurrentSkipListSet<>();
for (int val : input) {
writer.writeEvent(new TestEvent(val));
}
writer.flush();
// now wait until all the entries are read back.
for (int i = 0; i < 10; i++) {
// read 10 events back
Integer entry = queue1.take();
output.add(entry);
}
assertEquals(10, output.size());
log.info("first event processor read all the messages");
LinkedBlockingQueue<Integer> queue2 = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig2 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor2(queue2)).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).minRebalanceIntervalMillis(Duration.ofMillis(100).toMillis()).build();
// add another system and event processor group (effectively add a new set of readers to the readergroup)
EventProcessorSystem system2 = new EventProcessorSystemImpl("Controller", "process2", scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup2 = system2.createEventProcessorGroup(eventProcessorConfig2, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup2.awaitRunning();
log.info("second event processor started");
AtomicInteger queue1EntriesFound = new AtomicInteger(0);
AtomicInteger queue2EntriesFound = new AtomicInteger(0);
ConcurrentSkipListSet<Integer> output2 = new ConcurrentSkipListSet<>();
// wait until rebalance may have happened.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
AtomicBoolean allAssigned = new AtomicBoolean(false);
Futures.loop(() -> !allAssigned.get(), () -> Futures.delayedFuture(Duration.ofMillis(100), executorService()).thenAccept(v -> {
ReaderSegmentDistribution distribution = readerGroup.getReaderSegmentDistribution();
int numberOfReaders = distribution.getReaderSegmentDistribution().size();
allAssigned.set(numberOfReaders == 2 && distribution.getReaderSegmentDistribution().values().stream().noneMatch(x -> x == 0));
}), executorService()).join();
// write 10 new events
for (int val : input) {
writer.writeEvent(new TestEvent(val));
}
writer.flush();
// wait until at least one event is read from queue2
CompletableFuture.allOf(CompletableFuture.runAsync(() -> {
while (output2.size() < 10) {
Integer entry = queue1.poll();
if (entry != null) {
log.info("entry read from queue 1: {}", entry);
queue1EntriesFound.incrementAndGet();
output2.add(entry);
} else {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
}), CompletableFuture.runAsync(() -> {
while (output2.size() < 10) {
Integer entry = queue2.poll();
if (entry != null) {
log.info("entry read from queue 2: {}", entry);
queue2EntriesFound.incrementAndGet();
output2.add(entry);
} else {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
})).join();
assertTrue(queue1EntriesFound.get() > 0);
assertTrue(queue2EntriesFound.get() > 0);
assertEquals(10, output2.size());
}
Aggregations