use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class EventStreamTest method testWriteStreamInfoWhenPresent.
@Test
public void testWriteStreamInfoWhenPresent() {
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final SubscriptionCursor cursor = new SubscriptionCursor("11", "000000000000000012", "event-type", "token-id");
final ArrayList<ConsumedEvent> events = Lists.newArrayList(new ConsumedEvent("{\"a\":\"b\"}".getBytes(), mock(NakadiCursor.class)));
try {
writerProvider.getWriter().writeSubscriptionBatch(baos, cursor, events, Optional.of("something"));
final JSONObject batch = new JSONObject(baos.toString());
final JSONObject cursorM = batch.getJSONObject("cursor");
assertEquals("11", cursorM.getString("partition"));
assertEquals("000000000000000012", cursorM.getString("offset"));
assertEquals("event-type", cursorM.getString("event_type"));
assertEquals("token-id", cursorM.getString("cursor_token"));
final JSONArray eventsM = batch.getJSONArray("events");
assertSame(eventsM.length(), 1);
assertEquals("something", batch.getJSONObject("info").getString("debug"));
} catch (final IOException e) {
fail(e.getMessage());
}
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class EventStreamTest method nCountDummyConsumerForPartition.
private static NakadiKafkaConsumer nCountDummyConsumerForPartition(final int eventNum, final String partition) throws NakadiException {
final NakadiKafkaConsumer nakadiKafkaConsumer = mock(NakadiKafkaConsumer.class);
final AtomicInteger eventsToCreate = new AtomicInteger(eventNum);
when(nakadiKafkaConsumer.readEvents()).thenAnswer(invocation -> {
if (eventsToCreate.get() > 0) {
eventsToCreate.set(eventsToCreate.get() - 1);
return Collections.singletonList(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, partition, "000000000000000000")));
} else {
return Collections.emptyList();
}
});
return nakadiKafkaConsumer;
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class EventStreamTest method whenReadFromMultiplePartitionsThenGroupedInBatchesAccordingToPartition.
@Test(timeout = 10000)
public void whenReadFromMultiplePartitionsThenGroupedInBatchesAccordingToPartition() throws NakadiException, IOException, InterruptedException {
final EventStreamConfig config = EventStreamConfig.builder().withCursors(ImmutableList.of(NakadiCursor.of(TIMELINE, "0", "000000000000000000"), NakadiCursor.of(TIMELINE, "1", "000000000000000000"), NakadiCursor.of(TIMELINE, "2", "000000000000000000"))).withBatchLimit(2).withStreamLimit(6).withBatchTimeout(30).withConsumingClient(mock(Client.class)).build();
final ByteArrayOutputStream out = new ByteArrayOutputStream();
final LinkedList<ConsumedEvent> events = new LinkedList<>();
events.add(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, "0", "000000000000000000")));
events.add(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, "1", "000000000000000000")));
events.add(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, "2", "000000000000000000")));
events.add(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, "0", "000000000000000000")));
events.add(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, "1", "000000000000000000")));
events.add(new ConsumedEvent(DUMMY, NakadiCursor.of(TIMELINE, "2", "000000000000000000")));
final EventStream eventStream = new EventStream(predefinedConsumer(events), out, config, mock(BlacklistService.class), cursorConverter, BYTES_FLUSHED_METER, writerProvider, kpiPublisher, kpiEventType, kpiFrequencyMs);
eventStream.streamEvents(new AtomicBoolean(true), () -> {
});
final String[] batches = out.toString().split(BATCH_SEPARATOR);
assertThat(batches, arrayWithSize(3));
assertThat(batches[0], sameJSONAs(jsonBatch("0", "001-0000-000000000000000000", Optional.of(nCopies(2, new String(DUMMY))))));
assertThat(batches[1], sameJSONAs(jsonBatch("1", "001-0000-000000000000000000", Optional.of(nCopies(2, new String(DUMMY))))));
assertThat(batches[2], sameJSONAs(jsonBatch("2", "001-0000-000000000000000000", Optional.of(nCopies(2, new String(DUMMY))))));
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class PartitionDataTest method normalOperationShouldNotReconfigureKafkaConsumer.
@Test
public void normalOperationShouldNotReconfigureKafkaConsumer() {
final PartitionData pd = new PartitionData(COMP, null, createCursor(100L), System.currentTimeMillis());
for (long i = 0; i < 100; ++i) {
pd.addEvent(new ConsumedEvent(("test_" + i).getBytes(), createCursor(100L + i + 1)));
}
// Now say to it that it was sent
pd.takeEventsToStream(currentTimeMillis(), 1000, 0L);
assertEquals(100L, pd.getUnconfirmed());
for (long i = 0; i < 10; ++i) {
final PartitionData.CommitResult cr = pd.onCommitOffset(createCursor(110L + i * 10L));
assertEquals(10L, cr.committedCount);
assertFalse(cr.seekOnKafka);
assertEquals(90L - i * 10L, pd.getUnconfirmed());
}
}
use of org.zalando.nakadi.domain.ConsumedEvent in project nakadi by zalando.
the class PartitionDataTest method eventsShouldBeStreamedOnTimeout.
@Test
public void eventsShouldBeStreamedOnTimeout() {
final long timeout = TimeUnit.SECONDS.toMillis(1);
long currentTime = System.currentTimeMillis();
final PartitionData pd = new PartitionData(COMP, null, createCursor(100L), currentTime);
for (int i = 0; i < 100; ++i) {
pd.addEvent(new ConsumedEvent("test".getBytes(), createCursor(i + 100L + 1)));
}
List<ConsumedEvent> data = pd.takeEventsToStream(currentTime, 1000, timeout);
assertNull(data);
assertEquals(0, pd.getKeepAliveInARow());
currentTime += timeout + 1;
data = pd.takeEventsToStream(currentTime, 1000, timeout);
assertNotNull(data);
assertEquals(100, data.size());
for (int i = 100; i < 200; ++i) {
pd.addEvent(new ConsumedEvent("test".getBytes(), createCursor(i + 100L + 1)));
}
data = pd.takeEventsToStream(currentTime, 1000, timeout);
assertNull(data);
assertEquals(0, pd.getKeepAliveInARow());
currentTime += timeout + 1;
data = pd.takeEventsToStream(currentTime, 1000, timeout);
assertNotNull(data);
assertEquals(100, data.size());
}
Aggregations