use of org.apache.kafka.clients.consumer.ConsumerRecord in project flink by apache.
the class Kafka09FetcherTest method testCancellationWhenEmitBlocks.
@Test
public void testCancellationWhenEmitBlocks() throws Exception {
// ----- some test data -----
final String topic = "test-topic";
final int partition = 3;
final byte[] payload = new byte[] { 1, 2, 3, 4 };
final List<ConsumerRecord<byte[], byte[]>> records = Arrays.asList(new ConsumerRecord<byte[], byte[]>(topic, partition, 15, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 16, payload, payload), new ConsumerRecord<byte[], byte[]>(topic, partition, 17, payload, payload));
final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> data = new HashMap<>();
data.put(new TopicPartition(topic, partition), records);
final ConsumerRecords<byte[], byte[]> consumerRecords = new ConsumerRecords<>(data);
// ----- the test consumer -----
final KafkaConsumer<?, ?> mockConsumer = mock(KafkaConsumer.class);
when(mockConsumer.poll(anyLong())).thenAnswer(new Answer<ConsumerRecords<?, ?>>() {
@Override
public ConsumerRecords<?, ?> answer(InvocationOnMock invocation) {
return consumerRecords;
}
});
whenNew(KafkaConsumer.class).withAnyArguments().thenReturn(mockConsumer);
// ----- build a fetcher -----
BlockingSourceContext<String> sourceContext = new BlockingSourceContext<>();
Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets = Collections.singletonMap(new KafkaTopicPartition(topic, partition), KafkaTopicPartitionStateSentinel.GROUP_OFFSET);
KeyedDeserializationSchema<String> schema = new KeyedDeserializationSchemaWrapper<>(new SimpleStringSchema());
final Kafka09Fetcher<String> fetcher = new Kafka09Fetcher<>(sourceContext, partitionsWithInitialOffsets, null, /* periodic watermark extractor */
null, /* punctuated watermark extractor */
new TestProcessingTimeService(), 10, /* watermark interval */
this.getClass().getClassLoader(), "task_name", new UnregisteredMetricsGroup(), schema, new Properties(), 0L, false);
// ----- run the fetcher -----
final AtomicReference<Throwable> error = new AtomicReference<>();
final Thread fetcherRunner = new Thread("fetcher runner") {
@Override
public void run() {
try {
fetcher.runFetchLoop();
} catch (Throwable t) {
error.set(t);
}
}
};
fetcherRunner.start();
// wait until the thread started to emit records to the source context
sourceContext.waitTillHasBlocker();
// now we try to cancel the fetcher, including the interruption usually done on the task thread
// once it has finished, there must be no more thread blocked on the source context
fetcher.cancel();
fetcherRunner.interrupt();
fetcherRunner.join();
assertFalse("fetcher threads did not properly finish", sourceContext.isStillBlocking());
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project camel by apache.
the class KafkaEndpointTest method testCreatingKafkaExchangeSetsHeaders.
@Test
public void testCreatingKafkaExchangeSetsHeaders() throws URISyntaxException {
KafkaEndpoint endpoint = new KafkaEndpoint("kafka:mytopic?brokers=localhost", new KafkaComponent());
ConsumerRecord<String, String> record = new ConsumerRecord<String, String>("topic", 4, 56, "somekey", "");
Exchange exchange = endpoint.createKafkaExchange(record);
assertEquals("somekey", exchange.getIn().getHeader(KafkaConstants.KEY));
assertEquals("topic", exchange.getIn().getHeader(KafkaConstants.TOPIC));
assertEquals(4, exchange.getIn().getHeader(KafkaConstants.PARTITION));
assertEquals(56L, exchange.getIn().getHeader(KafkaConstants.OFFSET));
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project open-kilda by telstra.
the class Consumer method run.
@Override
public void run() {
Properties kprops = new Properties();
kprops.put("bootstrap.servers", kafka.url());
kprops.put("group.id", groupId);
// NB: only "true" is valid; no code to handle "false" yet.
kprops.put("enable.auto.commit", "true");
kprops.put("auto.commit.interval.ms", commitInterval);
kprops.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
kprops.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(kprops);
consumer.subscribe(Arrays.asList(topicName));
Executors.newSingleThreadExecutor(new ThreadFactoryBuilder().setNameFormat("heartbeat.consumer").build()).execute(new Runnable() {
@Override
public void run() {
try {
while (true) {
logger.trace("==> Poll for records");
ConsumerRecords<String, String> records = consumer.poll(1000);
logger.trace("==> Number of records: " + records.count());
// consumer.seekToBeginning(records.partitions());
for (ConsumerRecord<String, String> record : records) logger.debug("==> ==> offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value());
Thread.sleep(sleepTime);
}
} catch (InterruptedException e) {
logger.info("Heartbeat Consumer Interrupted");
} finally {
consumer.close();
}
}
});
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project nifi by apache.
the class ConsumerLease method writeData.
private void writeData(final ProcessSession session, ConsumerRecord<byte[], byte[]> record, final TopicPartition topicPartition) {
FlowFile flowFile = session.create();
final BundleTracker tracker = new BundleTracker(record, topicPartition, keyEncoding);
tracker.incrementRecordCount(1);
final byte[] value = record.value();
if (value != null) {
flowFile = session.write(flowFile, out -> {
out.write(value);
});
}
flowFile = session.putAllAttributes(flowFile, getAttributes(record));
tracker.updateFlowFile(flowFile);
populateAttributes(tracker);
session.transfer(tracker.flowFile, REL_SUCCESS);
}
use of org.apache.kafka.clients.consumer.ConsumerRecord in project ksql by confluentinc.
the class ConsumerCollector method addSensor.
private void addSensor(String key, String metricNameString, MeasurableStat stat, List<TopicSensors.SensorMetric<ConsumerRecord>> sensors, boolean isError, Function<ConsumerRecord, Double> recordValue) {
String name = "cons-" + key + "-" + metricNameString + "-" + id;
MetricName metricName = new MetricName(metricNameString, "consumer-metrics", "consumer-" + name, ImmutableMap.of("key", key, "id", id));
Sensor existingSensor = metrics.getSensor(name);
Sensor sensor = metrics.sensor(name);
// re-use the existing measurable stats to share between consumers
if (existingSensor == null || metrics.metrics().get(metricName) == null) {
sensor.add(metricName, stat);
}
KafkaMetric metric = metrics.metrics().get(metricName);
sensors.add(new TopicSensors.SensorMetric<ConsumerRecord>(sensor, metric, time, isError) {
void record(ConsumerRecord record) {
sensor.record(recordValue.apply(record));
super.record(record);
}
});
}
Aggregations