use of org.apache.kafka.clients.consumer.ConsumerRecords in project samza by apache.
the class TestZkStreamProcessorBase method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
protected void verifyNumMessages(String topic, final Map<Integer, Boolean> expectedValues, int expectedNumMessages) {
KafkaConsumer consumer = getKafkaConsumer();
consumer.subscribe(Collections.singletonList(topic));
Map<Integer, Boolean> map = new HashMap<>(expectedValues);
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
Iterator<ConsumerRecord> iterator = records.iterator();
while (iterator.hasNext()) {
ConsumerRecord record = iterator.next();
String val = new String((byte[]) record.value());
LOG.info("Got value " + val + "; count = " + count + "; out of " + expectedNumMessages);
Integer valI = Integer.valueOf(val);
if (valI < BAD_MESSAGE_KEY) {
map.put(valI, true);
count++;
}
}
} else {
emptyPollCount++;
LOG.warn("empty polls " + emptyPollCount);
}
}
// filter out numbers we did not get
long numFalse = map.values().stream().filter(v -> !v).count();
Assert.assertEquals("didn't get this number of events ", 0, numFalse);
Assert.assertEquals(expectedNumMessages, count);
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project flink by apache.
the class Kafka09FetcherTest method ensureOffsetsGetCommitted.
@Test
public void ensureOffsetsGetCommitted() throws Exception {
// test data
final KafkaTopicPartition testPartition1 = new KafkaTopicPartition("test", 42);
final KafkaTopicPartition testPartition2 = new KafkaTopicPartition("another", 99);
final Map<KafkaTopicPartition, Long> testCommitData1 = new HashMap<>();
testCommitData1.put(testPartition1, 11L);
testCommitData1.put(testPartition2, 18L);
final Map<KafkaTopicPartition, Long> testCommitData2 = new HashMap<>();
testCommitData2.put(testPartition1, 19L);
testCommitData2.put(testPartition2, 28L);
final BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> commitStore = new LinkedBlockingQueue<>();
// ----- the mock consumer with poll(), wakeup(), and commit(A)sync calls ----
final MultiShotLatch blockerLatch = new MultiShotLatch();
KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) throws InterruptedException {
blockerLatch.await();
return ConsumerRecords.empty();
}
});
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
blockerLatch.trigger();
return null;
}
}).when(mockConsumer).wakeup();
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) {
@SuppressWarnings("unchecked") Map<TopicPartition, OffsetAndMetadata> offsets = (Map<TopicPartition, OffsetAndMetadata>) invocation.getArguments()[0];
OffsetCommitCallback callback = (OffsetCommitCallback) invocation.getArguments()[1];
commitStore.add(offsets);
callback.onComplete(offsets, null);
return null;
}
}).when(mockConsumer).commitAsync(Mockito.<Map<TopicPartition, OffsetAndMetadata>>any(), any(OffsetCommitCallback.class));
// make sure the fetcher creates the mock consumer
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- create the test fetcher -----
@SuppressWarnings("unchecked") SourceContext<String> sourceContext = mock(SourceContext.class);
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition("test", 42), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// ----- trigger the first offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData1);
Map<TopicPartition, OffsetAndMetadata> result1 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result1.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(12L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(17L, entry.getValue().offset());
}
}
// ----- trigger the second offset commit -----
fetcher.commitInternalOffsetsToKafka(testCommitData2);
Map<TopicPartition, OffsetAndMetadata> result2 = commitStore.take();
for (Entry<TopicPartition, OffsetAndMetadata> entry : result2.entrySet()) {
TopicPartition partition = entry.getKey();
if (partition.topic().equals("test")) {
assertEquals(42, partition.partition());
assertEquals(20L, entry.getValue().offset());
} else if (partition.topic().equals("another")) {
assertEquals(99, partition.partition());
assertEquals(27L, entry.getValue().offset());
}
}
// ----- test done, wait till the fetcher is done for a clean shutdown -----
fetcher.cancel();
fetcherRunner.join();
// check that there were no errors in the fetcher
final Throwable caughtError = error.get();
if (caughtError != null && !(caughtError instanceof Handover.ClosedException)) {
throw new Exception("Exception in the fetcher", caughtError);
}
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project kafka by apache.
the class WorkerSinkTaskThreadedTest method expectPolls.
// Note that this can only be called once per test currently
private Capture<Collection<SinkRecord>> expectPolls(final long pollDelayMs) throws Exception {
// Stub out all the consumer stream/iterator responses, which we just want to verify occur,
// but don't care about the exact details here.
EasyMock.expect(consumer.poll(EasyMock.anyLong())).andStubAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {
@Override
public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
// "Sleep" so time will progress
time.sleep(pollDelayMs);
ConsumerRecords<byte[], byte[]> records = new ConsumerRecords<>(Collections.singletonMap(new TopicPartition(TOPIC, PARTITION), Arrays.asList(new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturned, TIMESTAMP, TIMESTAMP_TYPE, 0L, 0, 0, RAW_KEY, RAW_VALUE))));
recordsReturned++;
return records;
}
});
EasyMock.expect(keyConverter.toConnectData(TOPIC, RAW_KEY)).andReturn(new SchemaAndValue(KEY_SCHEMA, KEY)).anyTimes();
EasyMock.expect(valueConverter.toConnectData(TOPIC, RAW_VALUE)).andReturn(new SchemaAndValue(VALUE_SCHEMA, VALUE)).anyTimes();
final Capture<SinkRecord> recordCapture = EasyMock.newCapture();
EasyMock.expect(transformationChain.apply(EasyMock.capture(recordCapture))).andAnswer(new IAnswer<SinkRecord>() {
@Override
public SinkRecord answer() {
return recordCapture.getValue();
}
}).anyTimes();
Capture<Collection<SinkRecord>> capturedRecords = EasyMock.newCapture(CaptureType.ALL);
sinkTask.put(EasyMock.capture(capturedRecords));
EasyMock.expectLastCall().anyTimes();
return capturedRecords;
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project storm by apache.
the class KafkaSpoutRebalanceTest method emitOneMessagePerPartitionThenRevokeOnePartition.
//Returns messageIds in order of emission
private List<KafkaSpoutMessageId> emitOneMessagePerPartitionThenRevokeOnePartition(KafkaSpout<String, String> spout, TopicPartition partitionThatWillBeRevoked, TopicPartition assignedPartition) {
//Setup spout with mock consumer so we can get at the rebalance listener
spout.open(conf, contextMock, collectorMock);
spout.activate();
ArgumentCaptor<ConsumerRebalanceListener> rebalanceListenerCapture = ArgumentCaptor.forClass(ConsumerRebalanceListener.class);
verify(consumerMock).subscribe(anyCollection(), rebalanceListenerCapture.capture());
//Assign partitions to the spout
ConsumerRebalanceListener consumerRebalanceListener = rebalanceListenerCapture.getValue();
List<TopicPartition> assignedPartitions = new ArrayList<>();
assignedPartitions.add(partitionThatWillBeRevoked);
assignedPartitions.add(assignedPartition);
consumerRebalanceListener.onPartitionsAssigned(assignedPartitions);
//Make the consumer return a single message for each partition
Map<TopicPartition, List<ConsumerRecord<String, String>>> firstPartitionRecords = new HashMap<>();
firstPartitionRecords.put(partitionThatWillBeRevoked, Collections.singletonList(new ConsumerRecord(partitionThatWillBeRevoked.topic(), partitionThatWillBeRevoked.partition(), 0L, "key", "value")));
Map<TopicPartition, List<ConsumerRecord<String, String>>> secondPartitionRecords = new HashMap<>();
secondPartitionRecords.put(assignedPartition, Collections.singletonList(new ConsumerRecord(assignedPartition.topic(), assignedPartition.partition(), 0L, "key", "value")));
when(consumerMock.poll(anyLong())).thenReturn(new ConsumerRecords(firstPartitionRecords)).thenReturn(new ConsumerRecords(secondPartitionRecords)).thenReturn(new ConsumerRecords(Collections.emptyMap()));
//Emit the messages
spout.nextTuple();
ArgumentCaptor<KafkaSpoutMessageId> messageIdForRevokedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock).emit(anyObject(), anyObject(), messageIdForRevokedPartition.capture());
reset(collectorMock);
spout.nextTuple();
ArgumentCaptor<KafkaSpoutMessageId> messageIdForAssignedPartition = ArgumentCaptor.forClass(KafkaSpoutMessageId.class);
verify(collectorMock).emit(anyObject(), anyObject(), messageIdForAssignedPartition.capture());
//Now rebalance
consumerRebalanceListener.onPartitionsRevoked(assignedPartitions);
consumerRebalanceListener.onPartitionsAssigned(Collections.singleton(assignedPartition));
List<KafkaSpoutMessageId> emittedMessageIds = new ArrayList<>();
emittedMessageIds.add(messageIdForRevokedPartition.getValue());
emittedMessageIds.add(messageIdForAssignedPartition.getValue());
return emittedMessageIds;
}
use of org.apache.kafka.clients.consumer.ConsumerRecords in project ignite by apache.
the class IgniteSourceConnectorTest method checkDataDelivered.
/**
* Checks if events were delivered to Kafka server.
*
* @param expectedEventsCnt Expected events count.
* @throws Exception If failed.
*/
private void checkDataDelivered(final int expectedEventsCnt) throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-grp");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1);
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.ignite.stream.kafka.connect.serialization.CacheEventDeserializer");
final KafkaConsumer<String, CacheEvent> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(TOPICS));
final AtomicInteger evtCnt = new AtomicInteger();
try {
// Wait for expected events count.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
for (ConsumerRecord<String, CacheEvent> record : records) {
info("Record: " + record);
evtCnt.getAndIncrement();
}
return evtCnt.get() >= expectedEventsCnt;
}
}, 20_000);
info("Waiting for unexpected records for 5 secs.");
assertFalse(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
for (ConsumerRecord<String, CacheEvent> record : records) {
error("Unexpected record: " + record);
evtCnt.getAndIncrement();
}
return evtCnt.get() > expectedEventsCnt;
}
}, 5_000));
} catch (WakeupException ignored) {
// ignore for shutdown.
} finally {
consumer.close();
assertEquals(expectedEventsCnt, evtCnt.get());
}
}
Aggregations