use of org.zalando.nakadi.domain.PartitionEndStatistics in project nakadi by zalando.
the class CursorOperationsService method cursorsLag.
public List<NakadiCursorLag> cursorsLag(final String eventTypeName, final List<NakadiCursor> cursors) throws InvalidCursorOperation {
try {
final List<Timeline> timelines = timelineService.getActiveTimelinesOrdered(eventTypeName);
// Next 2 calls could be optimized to 1 storage call, instead of possible 2 calls.
// But it is simpler not to do anything, cause timelines are not switched every day and almost all the time
// (except retention time after switch) there will be only 1 active timeline, and this option is covered.
final List<PartitionStatistics> oldestStats = getStatsForTimeline(timelines.get(0));
final List<PartitionStatistics> newestStats = timelines.size() == 1 ? oldestStats : getStatsForTimeline(timelines.get(timelines.size() - 1));
return cursors.stream().map(c -> {
final PartitionStatistics oldestStat = oldestStats.stream().filter(item -> item.getPartition().equalsIgnoreCase(c.getPartition())).findAny().orElseThrow(() -> new InvalidCursorOperation(PARTITION_NOT_FOUND));
NakadiCursor newestPosition = newestStats.stream().filter(item -> item.getPartition().equalsIgnoreCase(c.getPartition())).map(PartitionEndStatistics::getLast).findAny().orElseThrow(() -> new InvalidCursorOperation(PARTITION_NOT_FOUND));
// it
while (numberOfEventsBeforeCursor(newestPosition) == -1) {
final int prevOrder = newestPosition.getTimeline().getOrder() - 1;
final Timeline prevTimeline = timelines.stream().filter(t -> t.getOrder() == prevOrder).findAny().orElse(null);
if (null == prevTimeline) {
break;
}
// We moved back, so timeline definitely have latest position set
newestPosition = prevTimeline.getLatestPosition().toNakadiCursor(prevTimeline, newestPosition.getPartition());
}
// calls (in case of kafka)
return new NakadiCursorLag(oldestStat.getFirst(), newestPosition, calculateDistance(c, newestPosition));
}).collect(Collectors.toList());
} catch (final NakadiException e) {
throw new MyNakadiRuntimeException1("error", e);
}
}
use of org.zalando.nakadi.domain.PartitionEndStatistics in project nakadi by zalando.
the class SubscriptionService method loadPartitionEndStatistics.
private List<PartitionEndStatistics> loadPartitionEndStatistics(final Collection<EventType> eventTypes) throws ServiceUnavailableException {
final List<PartitionEndStatistics> topicPartitions = new ArrayList<>();
final Map<TopicRepository, List<Timeline>> timelinesByRepo = eventTypes.stream().map(timelineService::getActiveTimeline).collect(Collectors.groupingBy(timelineService::getTopicRepository));
for (final Map.Entry<TopicRepository, List<Timeline>> repoEntry : timelinesByRepo.entrySet()) {
final TopicRepository topicRepository = repoEntry.getKey();
final List<Timeline> timelinesForRepo = repoEntry.getValue();
topicPartitions.addAll(topicRepository.loadTopicEndStatistics(timelinesForRepo));
}
return topicPartitions;
}
use of org.zalando.nakadi.domain.PartitionEndStatistics in project nakadi by zalando.
the class KafkaTopicRepository method loadTopicEndStatistics.
@Override
public List<PartitionEndStatistics> loadTopicEndStatistics(final Collection<Timeline> timelines) throws ServiceUnavailableException {
try (Consumer<byte[], byte[]> consumer = kafkaFactory.getConsumer()) {
final Map<TopicPartition, Timeline> backMap = new HashMap<>();
for (final Timeline timeline : timelines) {
consumer.partitionsFor(timeline.getTopic()).stream().map(p -> new TopicPartition(p.topic(), p.partition())).forEach(tp -> backMap.put(tp, timeline));
}
final List<TopicPartition> kafkaTPs = newArrayList(backMap.keySet());
consumer.assign(kafkaTPs);
consumer.seekToEnd(kafkaTPs);
return backMap.entrySet().stream().map(e -> {
final TopicPartition tp = e.getKey();
final Timeline timeline = e.getValue();
return new KafkaPartitionEndStatistics(timeline, tp.partition(), consumer.position(tp) - 1);
}).collect(toList());
} catch (final Exception e) {
throw new ServiceUnavailableException("Error occurred when fetching partitions offsets", e);
}
}
use of org.zalando.nakadi.domain.PartitionEndStatistics in project nakadi by zalando.
the class KafkaTopicRepositoryTest method canLoadPartitionEndStatistics.
@Test
public void canLoadPartitionEndStatistics() throws Exception {
final Timeline t1 = mock(Timeline.class);
when(t1.getTopic()).thenReturn(MY_TOPIC);
final Timeline t2 = mock(Timeline.class);
when(t2.getTopic()).thenReturn(ANOTHER_TOPIC);
final ImmutableList<Timeline> timelines = ImmutableList.of(t1, t2);
final List<PartitionEndStatistics> stats = kafkaTopicRepository.loadTopicEndStatistics(timelines);
final Set<PartitionEndStatistics> expected = PARTITIONS.stream().map(p -> {
final Timeline timeline = p.topic.equals(MY_TOPIC) ? t1 : t2;
return new KafkaPartitionEndStatistics(timeline, p.partition, p.latestOffset - 1);
}).collect(Collectors.toSet());
assertThat(newHashSet(stats), equalTo(expected));
}
use of org.zalando.nakadi.domain.PartitionEndStatistics in project nakadi by zalando.
the class SubscriptionControllerTest method whenGetSubscriptionStatThenOk.
@Test
public void whenGetSubscriptionStatThenOk() throws Exception {
final Subscription subscription = builder().withEventType(TIMELINE.getEventType()).build();
final Collection<Partition> partitions = Collections.singleton(new Partition(TIMELINE.getEventType(), "0", "xz", null, Partition.State.ASSIGNED));
final ZkSubscriptionNode zkSubscriptionNode = new ZkSubscriptionNode(partitions, Arrays.asList(new Session("xz", 0)));
when(subscriptionRepository.getSubscription(subscription.getId())).thenReturn(subscription);
when(zkSubscriptionClient.getZkSubscriptionNodeLocked()).thenReturn(Optional.of(zkSubscriptionNode));
final SubscriptionCursorWithoutToken currentOffset = new SubscriptionCursorWithoutToken(TIMELINE.getEventType(), "0", "3");
final EventTypePartition etp = new EventTypePartition(TIMELINE.getEventType(), "0");
final Map<EventTypePartition, SubscriptionCursorWithoutToken> offsets = new HashMap<>();
offsets.put(etp, currentOffset);
when(zkSubscriptionClient.getOffsets(Collections.singleton(etp))).thenReturn(offsets);
when(eventTypeRepository.findByName(TIMELINE.getEventType())).thenReturn(EventTypeTestBuilder.builder().name(TIMELINE.getEventType()).build());
final List<PartitionEndStatistics> statistics = Collections.singletonList(new KafkaPartitionEndStatistics(TIMELINE, 0, 13));
when(topicRepository.loadTopicEndStatistics(eq(Collections.singletonList(TIMELINE)))).thenReturn(statistics);
final NakadiCursor currentCursor = mock(NakadiCursor.class);
when(currentCursor.getEventTypePartition()).thenReturn(new EventTypePartition(TIMELINE.getEventType(), "0"));
when(cursorConverter.convert((List<SubscriptionCursorWithoutToken>) any())).thenReturn(Collections.singletonList(currentCursor));
when(cursorOperationsService.calculateDistance(eq(currentCursor), eq(statistics.get(0).getLast()))).thenReturn(10L);
final List<SubscriptionEventTypeStats> expectedStats = Collections.singletonList(new SubscriptionEventTypeStats(TIMELINE.getEventType(), Collections.singletonList(new SubscriptionEventTypeStats.Partition("0", "assigned", 10L, "xz", AUTO))));
getSubscriptionStats(subscription.getId()).andExpect(status().isOk()).andExpect(content().string(TestUtils.JSON_TEST_HELPER.matchesObject(new ItemsWrapper<>(expectedStats))));
}
Aggregations