use of org.zalando.nakadi.security.Client in project nakadi by zalando.
the class StreamingStateTest method prepareMocks.
@Before
public void prepareMocks() throws Exception {
state = new StreamingState();
final StreamingContext contextMock = mock(StreamingContext.class);
when(contextMock.getCursorComparator()).thenReturn(Comparator.comparing(NakadiCursor::getOffset));
when(contextMock.getSessionId()).thenReturn(SESSION_ID);
when(contextMock.isInState(Mockito.same(state))).thenReturn(true);
subscription = mock(Subscription.class);
when(contextMock.getSubscription()).thenReturn(subscription);
timelineService = mock(TimelineService.class);
when(contextMock.getTimelineService()).thenReturn(timelineService);
final MetricRegistry metricRegistry = mock(MetricRegistry.class);
when(metricRegistry.register(any(), any())).thenReturn(null);
when(contextMock.getMetricRegistry()).thenReturn(metricRegistry);
zkMock = mock(ZkSubscriptionClient.class);
when(contextMock.getZkClient()).thenReturn(zkMock);
cursorConverter = mock(CursorConverter.class);
when(contextMock.getCursorConverter()).thenReturn(cursorConverter);
final Client client = mock(Client.class);
when(client.getClientId()).thenReturn("consumingAppId");
final StreamParameters spMock = createStreamParameters(1000, 100L, 100, 100L, 100, 100, 100, client);
when(contextMock.getParameters()).thenReturn(spMock);
state.setContext(contextMock, "test");
}
use of org.zalando.nakadi.security.Client in project nakadi by zalando.
the class StreamingState method publishKpi.
private void publishKpi(final String eventTypeName) {
final Client client = getContext().getParameters().getConsumingClient();
final NakadiKpiPublisher kpiPublisher = getContext().getKpiPublisher();
final long bytes = kpiDataPerEventType.get(eventTypeName).getAndResetBytesSent();
final long count = kpiDataPerEventType.get(eventTypeName).getAndResetNumberOfEventsSent();
final String appNameHashed = kpiPublisher.hash(client.getClientId());
getLog().info("[SLO] [streamed-data] api={} eventTypeName={} app={} appHashed={} " + "numberOfEvents={} bytesStreamed={} subscription={}", "hila", eventTypeName, client.getClientId(), appNameHashed, count, bytes, getContext().getSubscription().getId());
kpiPublisher.publish(getContext().getKpiDataStreamedEventType(), () -> new JSONObject().put("api", "hila").put("subscription", getContext().getSubscription().getId()).put("event_type", eventTypeName).put("app", client.getClientId()).put("app_hashed", appNameHashed).put("token_realm", client.getRealm()).put("number_of_events", count).put("bytes_streamed", bytes));
}
use of org.zalando.nakadi.security.Client in project nakadi by zalando.
the class EventStreamController method streamEvents.
@RequestMapping(value = "/event-types/{name}/events", method = RequestMethod.GET)
public StreamingResponseBody streamEvents(@PathVariable("name") final String eventTypeName, @Nullable @RequestParam(value = "batch_limit", required = false) final Integer batchLimit, @Nullable @RequestParam(value = "stream_limit", required = false) final Integer streamLimit, @Nullable @RequestParam(value = "batch_flush_timeout", required = false) final Integer batchTimeout, @Nullable @RequestParam(value = "stream_timeout", required = false) final Integer streamTimeout, @Nullable @RequestParam(value = "stream_keep_alive_limit", required = false) final Integer streamKeepAliveLimit, @Nullable @RequestHeader(name = "X-nakadi-cursors", required = false) final String cursorsStr, final HttpServletRequest request, final HttpServletResponse response, final Client client) {
final String flowId = FlowIdUtils.peek();
return outputStream -> {
FlowIdUtils.push(flowId);
if (blacklistService.isConsumptionBlocked(eventTypeName, client.getClientId())) {
writeProblemResponse(response, outputStream, Problem.valueOf(Response.Status.FORBIDDEN, "Application or event type is blocked"));
return;
}
final AtomicBoolean connectionReady = closedConnectionsCrutch.listenForConnectionClose(request);
Counter consumerCounter = null;
EventStream eventStream = null;
List<ConnectionSlot> connectionSlots = ImmutableList.of();
final AtomicBoolean needCheckAuthorization = new AtomicBoolean(false);
LOG.info("[X-NAKADI-CURSORS] \"{}\" {}", eventTypeName, Optional.ofNullable(cursorsStr).orElse("-"));
try (Closeable ignore = eventTypeChangeListener.registerListener(et -> needCheckAuthorization.set(true), Collections.singletonList(eventTypeName))) {
final EventType eventType = eventTypeRepository.findByName(eventTypeName);
authorizeStreamRead(eventTypeName);
// validate parameters
final EventStreamConfig streamConfig = EventStreamConfig.builder().withBatchLimit(batchLimit).withStreamLimit(streamLimit).withBatchTimeout(batchTimeout).withStreamTimeout(streamTimeout).withStreamKeepAliveLimit(streamKeepAliveLimit).withEtName(eventTypeName).withConsumingClient(client).withCursors(getStreamingStart(eventType, cursorsStr)).withMaxMemoryUsageBytes(maxMemoryUsageBytes).build();
// acquire connection slots to limit the number of simultaneous connections from one client
if (featureToggleService.isFeatureEnabled(LIMIT_CONSUMERS_NUMBER)) {
final List<String> partitions = streamConfig.getCursors().stream().map(NakadiCursor::getPartition).collect(Collectors.toList());
connectionSlots = consumerLimitingService.acquireConnectionSlots(client.getClientId(), eventTypeName, partitions);
}
consumerCounter = metricRegistry.counter(metricNameFor(eventTypeName, CONSUMERS_COUNT_METRIC_NAME));
consumerCounter.inc();
final String kafkaQuotaClientId = getKafkaQuotaClientId(eventTypeName, client);
response.setStatus(HttpStatus.OK.value());
response.setHeader("Warning", "299 - nakadi - the Low-level API is deprecated and will " + "be removed from a future release. Please consider migrating to the Subscriptions API.");
response.setContentType("application/x-json-stream");
final EventConsumer eventConsumer = timelineService.createEventConsumer(kafkaQuotaClientId, streamConfig.getCursors());
final String bytesFlushedMetricName = MetricUtils.metricNameForLoLAStream(client.getClientId(), eventTypeName);
final Meter bytesFlushedMeter = this.streamMetrics.meter(bytesFlushedMetricName);
eventStream = eventStreamFactory.createEventStream(outputStream, eventConsumer, streamConfig, bytesFlushedMeter);
// Flush status code to client
outputStream.flush();
eventStream.streamEvents(connectionReady, () -> {
if (needCheckAuthorization.getAndSet(false)) {
authorizeStreamRead(eventTypeName);
}
});
} catch (final UnparseableCursorException e) {
LOG.debug("Incorrect syntax of X-nakadi-cursors header: {}. Respond with BAD_REQUEST.", e.getCursors(), e);
writeProblemResponse(response, outputStream, BAD_REQUEST, e.getMessage());
} catch (final NoSuchEventTypeException e) {
writeProblemResponse(response, outputStream, NOT_FOUND, "topic not found");
} catch (final NoConnectionSlotsException e) {
LOG.debug("Connection creation failed due to exceeding max connection count");
writeProblemResponse(response, outputStream, e.asProblem());
} catch (final NakadiException e) {
LOG.error("Error while trying to stream events.", e);
writeProblemResponse(response, outputStream, e.asProblem());
} catch (final InvalidCursorException e) {
writeProblemResponse(response, outputStream, PRECONDITION_FAILED, e.getMessage());
} catch (final AccessDeniedException e) {
writeProblemResponse(response, outputStream, FORBIDDEN, e.explain());
} catch (final Exception e) {
LOG.error("Error while trying to stream events. Respond with INTERNAL_SERVER_ERROR.", e);
writeProblemResponse(response, outputStream, INTERNAL_SERVER_ERROR, e.getMessage());
} finally {
connectionReady.set(false);
consumerLimitingService.releaseConnectionSlots(connectionSlots);
if (consumerCounter != null) {
consumerCounter.dec();
}
if (eventStream != null) {
eventStream.close();
}
try {
outputStream.flush();
} finally {
outputStream.close();
}
}
};
}
use of org.zalando.nakadi.security.Client in project nakadi by zalando.
the class EventStream method streamEvents.
public void streamEvents(final AtomicBoolean connectionReady, final Runnable checkAuthorization) {
try {
int messagesRead = 0;
final Map<String, Integer> keepAliveInARow = createMapWithPartitionKeys(partition -> 0);
final Map<String, List<byte[]>> currentBatches = createMapWithPartitionKeys(partition -> Lists.newArrayList());
// Partition to NakadiCursor.
final Map<String, NakadiCursor> latestOffsets = config.getCursors().stream().collect(Collectors.toMap(NakadiCursor::getPartition, c -> c));
final long start = currentTimeMillis();
final Map<String, Long> batchStartTimes = createMapWithPartitionKeys(partition -> start);
final List<ConsumedEvent> consumedEvents = new LinkedList<>();
long lastKpiEventSent = System.currentTimeMillis();
long bytesInMemory = 0;
while (connectionReady.get() && !blacklistService.isConsumptionBlocked(config.getEtName(), config.getConsumingClient().getClientId())) {
checkAuthorization.run();
if (consumedEvents.isEmpty()) {
// TODO: There are a lot of optimizations here, one can significantly improve code by processing
// all events at the same time, instead of processing one by one.
consumedEvents.addAll(eventConsumer.readEvents());
}
final Optional<ConsumedEvent> eventOrEmpty = consumedEvents.isEmpty() ? Optional.empty() : Optional.of(consumedEvents.remove(0));
if (eventOrEmpty.isPresent()) {
final ConsumedEvent event = eventOrEmpty.get();
// update offset for the partition of event that was read
latestOffsets.put(event.getPosition().getPartition(), event.getPosition());
// put message to batch
currentBatches.get(event.getPosition().getPartition()).add(event.getEvent());
messagesRead++;
bytesInMemory += event.getEvent().length;
// if we read the message - reset keep alive counter for this partition
keepAliveInARow.put(event.getPosition().getPartition(), 0);
}
// for each partition check if it's time to send the batch
for (final String partition : latestOffsets.keySet()) {
final long timeSinceBatchStart = currentTimeMillis() - batchStartTimes.get(partition);
if (config.getBatchTimeout() * 1000 <= timeSinceBatchStart || currentBatches.get(partition).size() >= config.getBatchLimit()) {
final List<byte[]> eventsToSend = currentBatches.get(partition);
sendBatch(latestOffsets.get(partition), eventsToSend);
if (!eventsToSend.isEmpty()) {
bytesInMemory -= eventsToSend.stream().mapToLong(v -> v.length).sum();
eventsToSend.clear();
} else {
// if we hit keep alive count limit - close the stream
keepAliveInARow.put(partition, keepAliveInARow.get(partition) + 1);
}
batchStartTimes.put(partition, currentTimeMillis());
}
}
// Dump some data that is exceeding memory limits
while (isMemoryLimitReached(bytesInMemory)) {
final Map.Entry<String, List<byte[]>> heaviestPartition = currentBatches.entrySet().stream().max(Comparator.comparing(entry -> entry.getValue().stream().mapToLong(event -> event.length).sum())).get();
sendBatch(latestOffsets.get(heaviestPartition.getKey()), heaviestPartition.getValue());
final long freed = heaviestPartition.getValue().stream().mapToLong(v -> v.length).sum();
LOG.warn("Memory limit reached for event type {}: {} bytes. Freed: {} bytes, {} messages", config.getEtName(), bytesInMemory, freed, heaviestPartition.getValue().size());
bytesInMemory -= freed;
// Init new batch for subscription
heaviestPartition.getValue().clear();
batchStartTimes.put(heaviestPartition.getKey(), currentTimeMillis());
}
if (lastKpiEventSent + kpiFrequencyMs < System.currentTimeMillis()) {
final long count = kpiData.getAndResetNumberOfEventsSent();
final long bytes = kpiData.getAndResetBytesSent();
publishKpi(config.getConsumingClient(), count, bytes);
lastKpiEventSent = System.currentTimeMillis();
}
// check if we reached keepAliveInARow for all the partitions; if yes - then close stream
if (config.getStreamKeepAliveLimit() != 0) {
final boolean keepAliveLimitReachedForAllPartitions = keepAliveInARow.values().stream().allMatch(keepAlives -> keepAlives >= config.getStreamKeepAliveLimit());
if (keepAliveLimitReachedForAllPartitions) {
break;
}
}
// check if we reached the stream timeout or message count limit
final long timeSinceStart = currentTimeMillis() - start;
if (config.getStreamTimeout() != 0 && timeSinceStart >= config.getStreamTimeout() * 1000 || config.getStreamLimit() != 0 && messagesRead >= config.getStreamLimit()) {
for (final String partition : latestOffsets.keySet()) {
if (currentBatches.get(partition).size() > 0) {
sendBatch(latestOffsets.get(partition), currentBatches.get(partition));
}
}
break;
}
}
} catch (final IOException e) {
LOG.info("I/O error occurred when streaming events (possibly client closed connection)", e);
} catch (final IllegalStateException e) {
LOG.info("Error occurred when streaming events (possibly server closed connection)", e);
} catch (final KafkaException e) {
LOG.error("Error occurred when polling events from kafka; consumer: {}, event-type: {}", config.getConsumingClient().getClientId(), config.getEtName(), e);
} finally {
publishKpi(config.getConsumingClient(), kpiData.getAndResetNumberOfEventsSent(), kpiData.getAndResetBytesSent());
}
}
Aggregations