use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class MultiTimelineEventConsumer method readEvents.
@Override
public List<ConsumedEvent> readEvents() {
if (timelinesChanged.compareAndSet(true, false)) {
try {
onTimelinesChanged();
} catch (final NakadiException | InvalidCursorException ex) {
throw new NakadiRuntimeException(ex);
}
}
final List<ConsumedEvent> result = poll();
for (final ConsumedEvent event : result) {
final EventTypePartition etp = event.getPosition().getEventTypePartition();
latestOffsets.put(etp, event.getPosition());
final String border = borderOffsets.get(etp);
final boolean timelineBorderReached = null != border && border.compareTo(event.getPosition().getOffset()) <= 0;
if (timelineBorderReached) {
timelinesChanged.set(true);
}
}
return result;
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class EventStreamTest method whenReadingEventsTheOrderIsCorrect.
@Test(timeout = 10000)
public void whenReadingEventsTheOrderIsCorrect() throws NakadiException, IOException, InterruptedException {
final EventStreamConfig config = EventStreamConfig.builder().withCursors(ImmutableList.of(NakadiCursor.of(TIMELINE, "0", "0"))).withBatchLimit(1).withStreamLimit(4).withConsumingClient(mock(Client.class)).build();
final ByteArrayOutputStream out = new ByteArrayOutputStream();
final int eventNum = 4;
final LinkedList<ConsumedEvent> events = new LinkedList<>(IntStream.range(0, eventNum).boxed().map(index -> new ConsumedEvent(("event" + index).getBytes(UTF_8), NakadiCursor.of(TIMELINE, "0", KafkaCursor.toNakadiOffset(index)))).collect(Collectors.toList()));
final EventStream eventStream = new EventStream(predefinedConsumer(events), out, config, mock(BlacklistService.class), cursorConverter, BYTES_FLUSHED_METER, writerProvider, kpiPublisher, kpiEventType, kpiFrequencyMs);
eventStream.streamEvents(new AtomicBoolean(true), () -> {
});
final String[] batches = out.toString().split(BATCH_SEPARATOR);
assertThat(batches, arrayWithSize(eventNum));
IntStream.range(0, eventNum).boxed().forEach(index -> assertThat(batches[index], sameJSONAs(jsonBatch("0", String.format("001-0000-%018d", index), Optional.of(nCopies(1, "event" + index))))));
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class EventStreamTest method endlessDummyConsumerForPartition.
private static NakadiKafkaConsumer endlessDummyConsumerForPartition(final String partition) throws NakadiException {
final NakadiKafkaConsumer nakadiKafkaConsumer = mock(NakadiKafkaConsumer.class);
when(nakadiKafkaConsumer.readEvents()).thenReturn(Collections.singletonList(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, partition, "0"))));
return nakadiKafkaConsumer;
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class NakadiKafkaConsumerTest method whenReadEventsThenGetRightEvents.
@Test
@SuppressWarnings("unchecked")
public void whenReadEventsThenGetRightEvents() {
// ARRANGE //
final byte[] event1 = randomString().getBytes();
final byte[] event2 = randomString().getBytes();
final int event1Offset = randomUInt();
final int event2Offset = randomUInt();
final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(ImmutableMap.of(new TopicPartition(TOPIC, PARTITION), ImmutableList.of(new ConsumerRecord<>(TOPIC, PARTITION, event1Offset, "k1".getBytes(), event1), new ConsumerRecord<>(TOPIC, PARTITION, event2Offset, "k2".getBytes(), event2))));
final Timeline timeline = buildTimeline(TOPIC, TOPIC, CREATED_AT);
final ConsumerRecords<byte[], byte[]> emptyRecords = new ConsumerRecords<>(ImmutableMap.of());
final KafkaConsumer<byte[], byte[]> kafkaConsumerMock = mock(KafkaConsumer.class);
final ArgumentCaptor<Long> pollTimeoutCaptor = ArgumentCaptor.forClass(Long.class);
when(kafkaConsumerMock.poll(pollTimeoutCaptor.capture())).thenReturn(consumerRecords, emptyRecords);
// we mock KafkaConsumer anyway, so the cursors we pass are not really important
final List<KafkaCursor> cursors = ImmutableList.of(kafkaCursor(TOPIC, PARTITION, 0));
// ACT //
final NakadiKafkaConsumer consumer = new NakadiKafkaConsumer(kafkaConsumerMock, cursors, createTpTimelineMap(), POLL_TIMEOUT);
final List<ConsumedEvent> consumedEvents = consumer.readEvents();
// ASSERT //
assertThat("The event we read first should not be empty", consumedEvents.size(), equalTo(2));
assertThat("The event we read first should have the same data as first mocked ConsumerRecord", consumedEvents.get(0), equalTo(new ConsumedEvent(event1, new KafkaCursor(TOPIC, PARTITION, event1Offset).toNakadiCursor(timeline))));
assertThat("The event we read second should have the same data as second mocked ConsumerRecord", consumedEvents.get(1), equalTo(new ConsumedEvent(event2, new KafkaCursor(TOPIC, PARTITION, event2Offset).toNakadiCursor(timeline))));
assertThat("The kafka poll should be called with timeout we defined", pollTimeoutCaptor.getValue(), equalTo(POLL_TIMEOUT));
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class PartitionDataTest method keepAliveCountShouldIncreaseOnEachEmptyCall.
@Test
public void keepAliveCountShouldIncreaseOnEachEmptyCall() {
final PartitionData pd = new PartitionData(COMP, null, createCursor(100L), System.currentTimeMillis());
for (int i = 0; i < 100; ++i) {
pd.takeEventsToStream(currentTimeMillis(), 10, 0L);
assertEquals(i + 1, pd.getKeepAliveInARow());
}
pd.addEvent(new ConsumedEvent("".getBytes(), createCursor(101L)));
assertEquals(100, pd.getKeepAliveInARow());
pd.takeEventsToStream(currentTimeMillis(), 10, 0L);
assertEquals(0, pd.getKeepAliveInARow());
pd.takeEventsToStream(currentTimeMillis(), 10, 0L);
assertEquals(1, pd.getKeepAliveInARow());
}
Aggregations