use of org.zalando.nakadi.repository.EventConsumer in project nakadi by zalando.
the class StreamingState method reconfigureKafkaConsumer.
private void reconfigureKafkaConsumer(final boolean forceSeek) {
if (eventConsumer == null) {
throw new IllegalStateException("kafkaConsumer should not be null when calling reconfigureKafkaConsumer method");
}
final Set<EventTypePartition> newAssignment = offsets.keySet().stream().filter(o -> !this.releasingPartitions.containsKey(o)).collect(Collectors.toSet());
if (forceSeek) {
// removing all the current assignments for real consumer.
try {
eventConsumer.reassign(Collections.emptyList());
} catch (final NakadiException | InvalidCursorException ex) {
throw new NakadiRuntimeException(ex);
}
}
final Set<EventTypePartition> currentAssignment = eventConsumer.getAssignment();
getLog().info("Changing kafka assignment from {} to {}", Arrays.deepToString(currentAssignment.toArray()), Arrays.deepToString(newAssignment.toArray()));
if (!currentAssignment.equals(newAssignment)) {
try {
final Map<EventTypePartition, NakadiCursor> beforeFirst = getBeforeFirstCursors(newAssignment);
final List<NakadiCursor> cursors = newAssignment.stream().map(pk -> {
final NakadiCursor beforeFirstAvailable = beforeFirst.get(pk);
// Checks that current cursor is still available in storage
offsets.get(pk).ensureDataAvailable(beforeFirstAvailable);
return offsets.get(pk).getSentOffset();
}).collect(Collectors.toList());
eventConsumer.reassign(cursors);
} catch (NakadiException | InvalidCursorException ex) {
throw new NakadiRuntimeException(ex);
}
}
}
use of org.zalando.nakadi.repository.EventConsumer in project nakadi by zalando.
the class EventStreamController method streamEvents.
@RequestMapping(value = "/event-types/{name}/events", method = RequestMethod.GET)
public StreamingResponseBody streamEvents(@PathVariable("name") final String eventTypeName, @Nullable @RequestParam(value = "batch_limit", required = false) final Integer batchLimit, @Nullable @RequestParam(value = "stream_limit", required = false) final Integer streamLimit, @Nullable @RequestParam(value = "batch_flush_timeout", required = false) final Integer batchTimeout, @Nullable @RequestParam(value = "stream_timeout", required = false) final Integer streamTimeout, @Nullable @RequestParam(value = "stream_keep_alive_limit", required = false) final Integer streamKeepAliveLimit, @Nullable @RequestHeader(name = "X-nakadi-cursors", required = false) final String cursorsStr, final HttpServletRequest request, final HttpServletResponse response, final Client client) {
final String flowId = FlowIdUtils.peek();
return outputStream -> {
FlowIdUtils.push(flowId);
if (blacklistService.isConsumptionBlocked(eventTypeName, client.getClientId())) {
writeProblemResponse(response, outputStream, Problem.valueOf(Response.Status.FORBIDDEN, "Application or event type is blocked"));
return;
}
final AtomicBoolean connectionReady = closedConnectionsCrutch.listenForConnectionClose(request);
Counter consumerCounter = null;
EventStream eventStream = null;
List<ConnectionSlot> connectionSlots = ImmutableList.of();
final AtomicBoolean needCheckAuthorization = new AtomicBoolean(false);
LOG.info("[X-NAKADI-CURSORS] \"{}\" {}", eventTypeName, Optional.ofNullable(cursorsStr).orElse("-"));
try (Closeable ignore = eventTypeChangeListener.registerListener(et -> needCheckAuthorization.set(true), Collections.singletonList(eventTypeName))) {
final EventType eventType = eventTypeRepository.findByName(eventTypeName);
authorizeStreamRead(eventTypeName);
// validate parameters
final EventStreamConfig streamConfig = EventStreamConfig.builder().withBatchLimit(batchLimit).withStreamLimit(streamLimit).withBatchTimeout(batchTimeout).withStreamTimeout(streamTimeout).withStreamKeepAliveLimit(streamKeepAliveLimit).withEtName(eventTypeName).withConsumingClient(client).withCursors(getStreamingStart(eventType, cursorsStr)).withMaxMemoryUsageBytes(maxMemoryUsageBytes).build();
// acquire connection slots to limit the number of simultaneous connections from one client
if (featureToggleService.isFeatureEnabled(LIMIT_CONSUMERS_NUMBER)) {
final List<String> partitions = streamConfig.getCursors().stream().map(NakadiCursor::getPartition).collect(Collectors.toList());
connectionSlots = consumerLimitingService.acquireConnectionSlots(client.getClientId(), eventTypeName, partitions);
}
consumerCounter = metricRegistry.counter(metricNameFor(eventTypeName, CONSUMERS_COUNT_METRIC_NAME));
consumerCounter.inc();
final String kafkaQuotaClientId = getKafkaQuotaClientId(eventTypeName, client);
response.setStatus(HttpStatus.OK.value());
response.setHeader("Warning", "299 - nakadi - the Low-level API is deprecated and will " + "be removed from a future release. Please consider migrating to the Subscriptions API.");
response.setContentType("application/x-json-stream");
final EventConsumer eventConsumer = timelineService.createEventConsumer(kafkaQuotaClientId, streamConfig.getCursors());
final String bytesFlushedMetricName = MetricUtils.metricNameForLoLAStream(client.getClientId(), eventTypeName);
final Meter bytesFlushedMeter = this.streamMetrics.meter(bytesFlushedMetricName);
eventStream = eventStreamFactory.createEventStream(outputStream, eventConsumer, streamConfig, bytesFlushedMeter);
// Flush status code to client
outputStream.flush();
eventStream.streamEvents(connectionReady, () -> {
if (needCheckAuthorization.getAndSet(false)) {
authorizeStreamRead(eventTypeName);
}
});
} catch (final UnparseableCursorException e) {
LOG.debug("Incorrect syntax of X-nakadi-cursors header: {}. Respond with BAD_REQUEST.", e.getCursors(), e);
writeProblemResponse(response, outputStream, BAD_REQUEST, e.getMessage());
} catch (final NoSuchEventTypeException e) {
writeProblemResponse(response, outputStream, NOT_FOUND, "topic not found");
} catch (final NoConnectionSlotsException e) {
LOG.debug("Connection creation failed due to exceeding max connection count");
writeProblemResponse(response, outputStream, e.asProblem());
} catch (final NakadiException e) {
LOG.error("Error while trying to stream events.", e);
writeProblemResponse(response, outputStream, e.asProblem());
} catch (final InvalidCursorException e) {
writeProblemResponse(response, outputStream, PRECONDITION_FAILED, e.getMessage());
} catch (final AccessDeniedException e) {
writeProblemResponse(response, outputStream, FORBIDDEN, e.explain());
} catch (final Exception e) {
LOG.error("Error while trying to stream events. Respond with INTERNAL_SERVER_ERROR.", e);
writeProblemResponse(response, outputStream, INTERNAL_SERVER_ERROR, e.getMessage());
} finally {
connectionReady.set(false);
consumerLimitingService.releaseConnectionSlots(connectionSlots);
if (consumerCounter != null) {
consumerCounter.dec();
}
if (eventStream != null) {
eventStream.close();
}
try {
outputStream.flush();
} finally {
outputStream.close();
}
}
};
}
use of org.zalando.nakadi.repository.EventConsumer in project nakadi by zalando.
the class EventStreamControllerTest method whenNormalCaseThenParametersArePassedToConfigAndStreamStarted.
@Test
public void whenNormalCaseThenParametersArePassedToConfigAndStreamStarted() throws Exception {
final EventConsumer eventConsumerMock = mock(EventConsumer.class);
when(eventTypeRepository.findByName(TEST_EVENT_TYPE_NAME)).thenReturn(EVENT_TYPE);
when(timelineService.createEventConsumer(eq(KAFKA_CLIENT_ID), eq(ImmutableList.of(NakadiCursor.of(timeline, "0", "000000000000000000"))))).thenReturn(eventConsumerMock);
when(timelineService.getActiveTimeline(eq(EVENT_TYPE))).thenReturn(timeline);
final ArgumentCaptor<Integer> statusCaptor = getStatusCaptor();
final ArgumentCaptor<String> contentTypeCaptor = getContentTypeCaptor();
final ArgumentCaptor<EventStreamConfig> configCaptor = ArgumentCaptor.forClass(EventStreamConfig.class);
final EventStream eventStreamMock = mock(EventStream.class);
when(eventStreamFactoryMock.createEventStream(any(), any(), configCaptor.capture(), any())).thenReturn(eventStreamMock);
final StreamingResponseBody responseBody = createStreamingResponseBody(1, 2, 3, 4, 5, "[{\"partition\":\"0\",\"offset\":\"000000000000000000\"}]");
final OutputStream outputStream = mock(OutputStream.class);
responseBody.writeTo(outputStream);
final EventStreamConfig streamConfig = configCaptor.getValue();
assertThat(streamConfig, equalTo(EventStreamConfig.builder().withCursors(ImmutableList.of(NakadiCursor.of(timeline, "0", "000000000000000000"))).withBatchLimit(1).withStreamLimit(2).withBatchTimeout(3).withStreamTimeout(4).withStreamKeepAliveLimit(5).build()));
assertThat(statusCaptor.getValue(), equalTo(HttpStatus.OK.value()));
assertThat(contentTypeCaptor.getValue(), equalTo("application/x-json-stream"));
verify(timelineService, times(1)).createEventConsumer(eq(KAFKA_CLIENT_ID), eq(ImmutableList.of(NakadiCursor.of(timeline, "0", "000000000000000000"))));
verify(eventStreamFactoryMock, times(1)).createEventStream(eq(outputStream), eq(eventConsumerMock), eq(streamConfig), any());
verify(eventStreamMock, times(1)).streamEvents(any(), any());
verify(outputStream, times(2)).flush();
verify(outputStream, times(1)).close();
}
Aggregations