use of org.zalando.nakadi.domain.PartitionStatistics in project nakadi by zalando.
the class PartitionsController method listPartitions.
@RequestMapping(value = "/event-types/{name}/partitions", method = RequestMethod.GET)
public ResponseEntity<?> listPartitions(@PathVariable("name") final String eventTypeName, final NativeWebRequest request) {
LOG.trace("Get partitions endpoint for event-type '{}' is called", eventTypeName);
try {
final EventType eventType = eventTypeRepository.findByName(eventTypeName);
authorizationValidator.authorizeStreamRead(eventType);
final List<Timeline> timelines = timelineService.getActiveTimelinesOrdered(eventTypeName);
final List<PartitionStatistics> firstStats = timelineService.getTopicRepository(timelines.get(0)).loadTopicStatistics(Collections.singletonList(timelines.get(0)));
final List<PartitionStatistics> lastStats;
if (timelines.size() == 1) {
lastStats = firstStats;
} else {
lastStats = timelineService.getTopicRepository(timelines.get(timelines.size() - 1)).loadTopicStatistics(Collections.singletonList(timelines.get(timelines.size() - 1)));
}
final List<EventTypePartitionView> result = firstStats.stream().map(first -> {
final PartitionStatistics last = lastStats.stream().filter(l -> l.getPartition().equals(first.getPartition())).findAny().get();
return new EventTypePartitionView(eventTypeName, first.getPartition(), cursorConverter.convert(first.getFirst()).getOffset(), cursorConverter.convert(last.getLast()).getOffset());
}).collect(Collectors.toList());
return ok().body(result);
} catch (final NoSuchEventTypeException e) {
return create(Problem.valueOf(NOT_FOUND, "topic not found"), request);
} catch (final NakadiException e) {
LOG.error("Could not list partitions. Respond with SERVICE_UNAVAILABLE.", e);
return create(e.asProblem(), request);
}
}
use of org.zalando.nakadi.domain.PartitionStatistics in project nakadi by zalando.
the class KafkaTopicRepositoryTest method canLoadPartitionStatistics.
@Test
public void canLoadPartitionStatistics() throws Exception {
final Timeline t1 = mock(Timeline.class);
when(t1.getTopic()).thenReturn(MY_TOPIC);
final Timeline t2 = mock(Timeline.class);
when(t2.getTopic()).thenReturn(ANOTHER_TOPIC);
final ImmutableList<Timeline> timelines = ImmutableList.of(t1, t2);
final List<PartitionStatistics> stats = kafkaTopicRepository.loadTopicStatistics(timelines);
final Set<PartitionStatistics> expected = PARTITIONS.stream().map(p -> {
final Timeline timeline = p.topic.equals(MY_TOPIC) ? t1 : t2;
return new KafkaPartitionStatistics(timeline, p.partition, p.earliestOffset, p.latestOffset - 1);
}).collect(Collectors.toSet());
assertThat(newHashSet(stats), equalTo(expected));
}
use of org.zalando.nakadi.domain.PartitionStatistics in project nakadi by zalando.
the class TimelineService method createTimeline.
public void createTimeline(final String eventTypeName, final String storageId) throws AccessDeniedException, TimelineException, TopicRepositoryException, InconsistentStateException, RepositoryProblemException, DbWriteOperationsBlockedException {
if (featureToggleService.isFeatureEnabled(FeatureToggleService.Feature.DISABLE_DB_WRITE_OPERATIONS)) {
throw new DbWriteOperationsBlockedException("Cannot create timeline: write operations on DB " + "are blocked by feature flag.");
}
try {
final EventType eventType = eventTypeCache.getEventType(eventTypeName);
if (!adminService.isAdmin(AuthorizationService.Operation.WRITE)) {
final Resource resource = new EventTypeResource(eventTypeName, eventType.getAuthorization());
throw new AccessDeniedException(AuthorizationService.Operation.ADMIN, resource);
}
final Storage storage = storageDbRepository.getStorage(storageId).orElseThrow(() -> new UnableProcessException("No storage with id: " + storageId));
final Timeline activeTimeline = getActiveTimeline(eventType);
final TopicRepository currentTopicRepo = topicRepositoryHolder.getTopicRepository(activeTimeline.getStorage());
final TopicRepository nextTopicRepo = topicRepositoryHolder.getTopicRepository(storage);
final List<PartitionStatistics> partitionStatistics = currentTopicRepo.loadTopicStatistics(Collections.singleton(activeTimeline));
final String newTopic = nextTopicRepo.createTopic(partitionStatistics.size(), eventType.getOptions().getRetentionTime());
final Timeline nextTimeline = Timeline.createTimeline(activeTimeline.getEventType(), activeTimeline.getOrder() + 1, storage, newTopic, new Date());
switchTimelines(activeTimeline, nextTimeline);
} catch (final TopicCreationException | ServiceUnavailableException | InternalNakadiException e) {
throw new TimelineException("Internal service error", e);
} catch (final NoSuchEventTypeException e) {
throw new NotFoundException("EventType \"" + eventTypeName + "\" does not exist", e);
}
}
use of org.zalando.nakadi.domain.PartitionStatistics in project nakadi by zalando.
the class EventStreamControllerTest method whenNoCursorsThenLatestOffsetsAreUsed.
@Test
public void whenNoCursorsThenLatestOffsetsAreUsed() throws NakadiException, IOException, InvalidCursorException {
when(eventTypeRepository.findByName(TEST_EVENT_TYPE_NAME)).thenReturn(EVENT_TYPE);
final List<PartitionStatistics> tps2 = ImmutableList.of(new KafkaPartitionStatistics(timeline, 0, 0, 87), new KafkaPartitionStatistics(timeline, 1, 0, 34));
when(timelineService.getActiveTimeline(any(EventType.class))).thenReturn(timeline);
when(topicRepositoryMock.loadTopicStatistics(eq(Collections.singletonList(timeline)))).thenReturn(tps2);
final ArgumentCaptor<EventStreamConfig> configCaptor = ArgumentCaptor.forClass(EventStreamConfig.class);
final EventStream eventStreamMock = mock(EventStream.class);
when(eventStreamFactoryMock.createEventStream(any(), any(), configCaptor.capture(), any())).thenReturn(eventStreamMock);
final StreamingResponseBody responseBody = createStreamingResponseBody(1, 0, 1, 1, 0, null);
responseBody.writeTo(new ByteArrayOutputStream());
final EventStreamConfig streamConfig = configCaptor.getValue();
assertThat(streamConfig.getCursors(), equalTo(tps2.stream().map(PartitionStatistics::getLast).collect(Collectors.toList())));
}
use of org.zalando.nakadi.domain.PartitionStatistics in project nakadi by zalando.
the class KafkaTopicRepository method convertToKafkaCursors.
private Map<NakadiCursor, KafkaCursor> convertToKafkaCursors(final List<NakadiCursor> cursors) throws ServiceUnavailableException, InvalidCursorException {
final List<Timeline> timelines = cursors.stream().map(NakadiCursor::getTimeline).distinct().collect(toList());
final List<PartitionStatistics> statistics = loadTopicStatistics(timelines);
final Map<NakadiCursor, KafkaCursor> result = new HashMap<>();
for (final NakadiCursor position : cursors) {
validateCursorForNulls(position);
final Optional<PartitionStatistics> partition = statistics.stream().filter(t -> Objects.equals(t.getPartition(), position.getPartition())).filter(t -> Objects.equals(t.getTimeline().getTopic(), position.getTopic())).findAny();
if (!partition.isPresent()) {
throw new InvalidCursorException(PARTITION_NOT_FOUND, position);
}
final KafkaCursor toCheck = position.asKafkaCursor();
// Checking oldest position
final KafkaCursor oldestCursor = KafkaCursor.fromNakadiCursor(partition.get().getBeforeFirst());
if (toCheck.compareTo(oldestCursor) < 0) {
throw new InvalidCursorException(UNAVAILABLE, position);
}
// checking newest position
final KafkaCursor newestPosition = KafkaCursor.fromNakadiCursor(partition.get().getLast());
if (toCheck.compareTo(newestPosition) > 0) {
throw new InvalidCursorException(UNAVAILABLE, position);
} else {
result.put(position, toCheck);
}
}
return result;
}
Aggregations