use of org.apache.kafka.clients.consumer.ConsumerRecord in project mist by snuspl.
the class KafkaSource method submitQuery.
/**
* Submit a query fetching data from a kafka source.
* The query reads strings from a kafka topic and send them to a sink server.
* @return result of the submission
* @throws IOException
* @throws InjectionException
*/
public static APIQueryControlResult submitQuery(final Configuration configuration) throws IOException, InjectionException, URISyntaxException {
final String sourceSocket = Tang.Factory.getTang().newInjector(configuration).getNamedInstance(KafkaSourceAddress.class);
final SourceConfiguration localKafkaSourceConf = MISTExampleUtils.getLocalKafkaSourceConf("KafkaSource", sourceSocket);
final MISTQueryBuilder queryBuilder = new MISTQueryBuilder();
queryBuilder.kafkaStream(localKafkaSourceConf).map(consumerRecord -> ((ConsumerRecord) consumerRecord).value()).textSocketOutput(MISTExampleUtils.SINK_HOSTNAME, MISTExampleUtils.SINK_PORT);
return MISTExampleUtils.submit(queryBuilder, configuration);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project atlas by apache.
the class KafkaConsumerTest method testReceive.
@Test
public void testReceive() throws Exception {
Referenceable entity = getEntity(TRAIT_NAME);
EntityUpdateRequest message = new EntityUpdateRequest("user1", entity);
String json = AtlasType.toV1Json(new AtlasNotificationMessage<>(new MessageVersion("1.0.0"), message));
TopicPartition tp = new TopicPartition("ATLAS_HOOK", 0);
List<ConsumerRecord<String, String>> klist = Collections.singletonList(new ConsumerRecord<>("ATLAS_HOOK", 0, 0L, "mykey", json));
Map mp = Collections.singletonMap(tp, klist);
ConsumerRecords records = new ConsumerRecords(mp);
when(kafkaConsumer.poll(100)).thenReturn(records);
kafkaConsumer.assign(Collections.singletonList(tp));
AtlasKafkaConsumer consumer = new AtlasKafkaConsumer(NotificationType.HOOK, kafkaConsumer, false, 100L);
List<AtlasKafkaMessage<HookNotification>> messageList = consumer.receive();
assertTrue(messageList.size() > 0);
HookNotification consumedMessage = messageList.get(0).getMessage();
assertMessagesEqual(message, consumedMessage, entity);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.
the class StreamApplicationIntegrationTestHarness method consumeMessages.
/**
* Read messages from the provided list of topics until {@param threshold} messages have been read or until
* {@link #numEmptyPolls} polls return no messages.
*
* The default poll time out is determined by {@link #POLL_TIMEOUT_MS} and the number of empty polls are
* determined by {@link #numEmptyPolls}
*
* @param topic the topic to consume from
* @param threshold the number of messages to consume
* @return the list of {@link ConsumerRecord}s whose size can be atmost {@param threshold}
*/
public List<ConsumerRecord<String, String>> consumeMessages(String topic, int threshold) {
int emptyPollCount = 0;
List<ConsumerRecord<String, String>> recordList = new ArrayList<>();
KafkaConsumer kafkaConsumer = topicConsumerMap.computeIfAbsent(topic, t -> new KafkaConsumer<>(createConsumerConfigs()));
kafkaConsumer.subscribe(Collections.singletonList(topic));
while (emptyPollCount < numEmptyPolls && recordList.size() < threshold) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(POLL_TIMEOUT_MS);
LOG.info("Read {} messages from topic: {}", records.count(), topic);
if (!records.isEmpty()) {
Iterator<ConsumerRecord<String, String>> iterator = records.iterator();
while (iterator.hasNext() && recordList.size() < threshold) {
ConsumerRecord record = iterator.next();
LOG.info("Read key: {} val: {} from topic: {} on partition: {}", record.key(), record.value(), record.topic(), record.partition());
recordList.add(record);
emptyPollCount = 0;
}
} else {
emptyPollCount++;
}
}
return recordList;
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.
the class TestZkStreamProcessorBase method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
protected void verifyNumMessages(String topic, final Map<Integer, Boolean> expectedValues, int expectedNumMessages) {
consumer.subscribe(Collections.singletonList(topic));
Map<Integer, Boolean> map = new HashMap<>(expectedValues);
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
Iterator<ConsumerRecord> iterator = records.iterator();
while (iterator.hasNext()) {
ConsumerRecord record = iterator.next();
String val = new String((byte[]) record.value());
LOG.info("Got value " + val + "; count = " + count + "; out of " + expectedNumMessages);
Integer valI = Integer.valueOf(val);
if (valI < BAD_MESSAGE_KEY) {
map.put(valI, true);
count++;
}
}
} else {
emptyPollCount++;
LOG.warn("empty polls " + emptyPollCount);
}
}
// filter out numbers we did not get
long numFalse = map.values().stream().filter(v -> !v).count();
Assert.assertEquals("didn't get this number of events ", 0, numFalse);
Assert.assertEquals(expectedNumMessages, count);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.
the class TransactionalStateIntegrationTest method secondRun.
private void secondRun(String changelogTopic, List<String> expectedChangelogMessages, List<String> expectedInitialStoreContents, Map<String, String> overriddenConfigs) {
// clear the local store directory
if (!hostAffinity) {
new FileUtil().rm(new File(LOGGED_STORE_BASE_DIR));
}
// produce the second batch of input messages
List<String> inputMessages = Arrays.asList("4", "5", "5", ":shutdown");
inputMessages.forEach(m -> produceMessage(INPUT_TOPIC, 0, m, m));
// run the application
RunApplicationContext context = runApplication(new MyStatefulApplication(INPUT_SYSTEM, INPUT_TOPIC, Collections.singletonMap(STORE_NAME, changelogTopic)), "myApp", overriddenConfigs);
// wait for the application to finish
context.getRunner().waitForFinish();
// consume and verify any additional changelog messages
List<ConsumerRecord<String, String>> changelogRecords = consumeMessages(changelogTopic, expectedChangelogMessages.size());
List<String> changelogMessages = changelogRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList());
Assert.assertEquals(expectedChangelogMessages, changelogMessages);
// verify the store contents during startup (this is after changelog verification to ensure init has completed)
Assert.assertEquals(expectedInitialStoreContents, MyStatefulApplication.getInitialStoreContents().get(STORE_NAME));
}
Aggregations