use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.
the class CheckpointVersionIntegrationTest method runStatefulApp.
private void runStatefulApp(List<String> inputMessages, List<String> expectedInputTopicMessages, List<String> expectedChangelogMessages, Map<String, String> configs) {
// create input topic and produce the first batch of input messages
createTopic(INPUT_TOPIC, 1);
inputMessages.forEach(m -> produceMessage(INPUT_TOPIC, 0, m, m));
// verify that the input messages were produced successfully
if (inputMessages.size() > 0) {
List<ConsumerRecord<String, String>> inputRecords = consumeMessages(INPUT_TOPIC, inputMessages.size());
List<String> readInputMessages = inputRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList());
Assert.assertEquals(expectedInputTopicMessages, readInputMessages);
}
// run the application
RunApplicationContext context = runApplication(new MyStatefulApplication(INPUT_SYSTEM, INPUT_TOPIC, Collections.singletonMap(STORE_NAME, CHANGELOG_TOPIC)), "myApp", configs);
// wait for the application to finish
context.getRunner().waitForFinish();
// consume and verify the changelog messages
if (expectedChangelogMessages.size() > 0) {
List<ConsumerRecord<String, String>> changelogRecords = consumeMessages(CHANGELOG_TOPIC, expectedChangelogMessages.size());
List<String> changelogMessages = changelogRecords.stream().map(ConsumerRecord::value).collect(Collectors.toList());
Assert.assertEquals(expectedChangelogMessages, changelogMessages);
}
LOG.info("Finished initial run");
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project samza by apache.
the class TestStreamProcessor method verifyNumMessages.
/**
* Consumes data from the topic until there are no new messages for a while
* and asserts that the number of consumed messages is as expected.
*/
@SuppressWarnings("unchecked")
private void verifyNumMessages(KafkaConsumer consumer, String topic, int expectedNumMessages) {
consumer.subscribe(Collections.singletonList(topic));
int count = 0;
int emptyPollCount = 0;
while (count < expectedNumMessages && emptyPollCount < 5) {
ConsumerRecords records = consumer.poll(5000);
if (!records.isEmpty()) {
for (ConsumerRecord record : (Iterable<ConsumerRecord>) records) {
Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
count++;
}
} else {
emptyPollCount++;
}
}
Assert.assertEquals(count, expectedNumMessages);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project storm by apache.
the class KafkaTridentSpoutEmitterEmitTest method testTimeStampStrategyWhenTopologyIsRedeployed.
@Test
public void testTimeStampStrategyWhenTopologyIsRedeployed() {
/**
* TIMESTAMP strategy should be applied if the emitter is new and the topology has been redeployed (storm id has changed)
* Offset should be reset according to the offset corresponding to startTimeStamp
*/
long preRestartEmittedOffset = 20;
int preRestartEmittedRecords = 10;
long timeStampStartOffset = 2L;
long pollTimeout = 1L;
KafkaTridentSpoutBatchMetadata preExecutorRestartLastMeta = new KafkaTridentSpoutBatchMetadata(preRestartEmittedOffset, preRestartEmittedOffset + preRestartEmittedRecords - 1, "Some older topology");
KafkaConsumer<String, String> kafkaConsumer = Mockito.mock(KafkaConsumer.class);
when(kafkaConsumer.assignment()).thenReturn(Collections.singleton(partition));
OffsetAndTimestamp offsetAndTimestamp = new OffsetAndTimestamp(timeStampStartOffset, startTimeStamp);
HashMap<TopicPartition, OffsetAndTimestamp> map = new HashMap<>();
map.put(partition, offsetAndTimestamp);
when(kafkaConsumer.offsetsForTimes(Collections.singletonMap(partition, startTimeStamp))).thenReturn(map);
HashMap<TopicPartition, List<ConsumerRecord<String, String>>> topicPartitionMap = new HashMap<>();
List<ConsumerRecord<String, String>> newRecords = SpoutWithMockedConsumerSetupHelper.createRecords(partition, timeStampStartOffset, recordsInKafka);
topicPartitionMap.put(partition, newRecords);
when(kafkaConsumer.poll(pollTimeout)).thenReturn(new ConsumerRecords<>(topicPartitionMap));
KafkaTridentSpoutEmitter<String, String> emitter = createEmitter(kafkaConsumer, FirstPollOffsetStrategy.TIMESTAMP);
TransactionAttempt txid = new TransactionAttempt(0L, 0);
KafkaTridentSpoutTopicPartition kttp = new KafkaTridentSpoutTopicPartition(partition);
Map<String, Object> meta = emitter.emitPartitionBatchNew(txid, collectorMock, kttp, preExecutorRestartLastMeta.toMap());
verify(collectorMock, times(recordsInKafka)).emit(emitCaptor.capture());
verify(kafkaConsumer, times(1)).seek(partition, timeStampStartOffset);
List<List<Object>> emits = emitCaptor.getAllValues();
assertThat(emits.get(0).get(0), is(timeStampStartOffset));
KafkaTridentSpoutBatchMetadata deserializedMeta = KafkaTridentSpoutBatchMetadata.fromMap(meta);
assertThat("The batch should start at the first offset for startTimestamp", deserializedMeta.getFirstOffset(), is(timeStampStartOffset));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project storm by apache.
the class DefaultRecordTranslatorTest method testBasic.
@Test
public void testBasic() {
DefaultRecordTranslator<String, String> trans = new DefaultRecordTranslator<>();
assertEquals(Arrays.asList("default"), trans.streams());
assertEquals(new Fields("topic", "partition", "offset", "key", "value"), trans.getFieldsFor("default"));
ConsumerRecord<String, String> cr = new ConsumerRecord<>("TOPIC", 100, 100, "THE KEY", "THE VALUE");
assertEquals(Arrays.asList("TOPIC", 100, 100l, "THE KEY", "THE VALUE"), trans.apply(cr));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project storm by apache.
the class ByTopicRecordTranslatorTest method testNullTranslation.
@Test
public void testNullTranslation() {
ByTopicRecordTranslator<String, String> trans = new ByTopicRecordTranslator<>((r) -> null, new Fields("key"));
ConsumerRecord<String, String> cr = new ConsumerRecord<>("TOPIC 1", 100, 100, "THE KEY", "THE VALUE");
assertEquals(null, trans.apply(cr));
}
Aggregations