use of org.apache.kafka.clients.consumer.Consumer in project brave by openzipkin.
the class TracingConsumerTest method should_create_only_one_consumer_span_per_topic.
@Test
public void should_create_only_one_consumer_span_per_topic() {
Map<TopicPartition, Long> offsets = new HashMap<>();
// 2 partitions in the same topic
offsets.put(new TopicPartition(TEST_TOPIC, 0), 0L);
offsets.put(new TopicPartition(TEST_TOPIC, 1), 0L);
consumer.updateBeginningOffsets(offsets);
consumer.assign(offsets.keySet());
// create 500 messages
for (int i = 0; i < 250; i++) {
consumer.addRecord(new ConsumerRecord<>(TEST_TOPIC, 0, i, TEST_KEY, TEST_VALUE));
consumer.addRecord(new ConsumerRecord<>(TEST_TOPIC, 1, i, TEST_KEY, TEST_VALUE));
}
Consumer<String, String> tracingConsumer = kafkaTracing.consumer(consumer);
tracingConsumer.poll(10);
// only one consumer span reported
assertThat(spans).hasSize(1).flatExtracting(s -> s.tags().entrySet()).containsOnly(entry("kafka.topic", "myTopic"));
}
use of org.apache.kafka.clients.consumer.Consumer in project incubator-gobblin by apache.
the class KafkaSimpleStreamingSource method getKafkaConsumer.
public static Consumer getKafkaConsumer(Config config) {
List<String> brokers = ConfigUtils.getStringList(config, ConfigurationKeys.KAFKA_BROKERS);
Properties props = new Properties();
props.put("bootstrap.servers", Joiner.on(",").join(brokers));
props.put("group.id", ConfigUtils.getString(config, ConfigurationKeys.JOB_NAME_KEY, StringUtils.EMPTY));
props.put("enable.auto.commit", "false");
Preconditions.checkArgument(config.hasPath(TOPIC_KEY_DESERIALIZER));
props.put("key.deserializer", config.getString(TOPIC_KEY_DESERIALIZER));
Preconditions.checkArgument(config.hasPath(TOPIC_VALUE_DESERIALIZER));
props.put("value.deserializer", config.getString(TOPIC_VALUE_DESERIALIZER));
// pass along any config scoped under source.kafka.config
// one use case of this is to pass SSL configuration
Config scopedConfig = ConfigUtils.getConfigOrEmpty(config, KAFKA_CONSUMER_CONFIG_PREFIX);
props.putAll(ConfigUtils.configToProperties(scopedConfig));
Consumer consumer = null;
try {
consumer = new KafkaConsumer<>(props);
} catch (Exception e) {
LOG.error("Exception when creating Kafka consumer - {}", e);
throw Throwables.propagate(e);
}
return consumer;
}
use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.
the class AbstractTaskTest method shouldNotAttemptToLockIfNoStores.
@Test
public void shouldNotAttemptToLockIfNoStores() {
final Consumer consumer = EasyMock.createNiceMock(Consumer.class);
EasyMock.replay(stateDirectory);
final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
task.registerStateStores();
// should fail if lock is called
EasyMock.verify(stateDirectory);
}
use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.
the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException.
@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() {
final Consumer consumer = mockConsumer(new AuthorizationException("blah"));
final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
task.updateOffsetLimits();
}
use of org.apache.kafka.clients.consumer.Consumer in project core-ng-project by neowu.
the class KafkaMessageListenerThread method process.
private void process(Consumer<String, byte[]> consumer, ConsumerRecords<String, byte[]> kafkaRecords) {
StopWatch watch = new StopWatch();
int count = 0;
int size = 0;
try {
Map<String, List<ConsumerRecord<String, byte[]>>> messages = Maps.newLinkedHashMap();
for (ConsumerRecord<String, byte[]> record : kafkaRecords) {
messages.computeIfAbsent(record.topic(), key -> Lists.newArrayList()).add(record);
count++;
size += record.value().length;
}
for (Map.Entry<String, List<ConsumerRecord<String, byte[]>>> entry : messages.entrySet()) {
String topic = entry.getKey();
List<ConsumerRecord<String, byte[]>> records = entry.getValue();
KafkaMessageListener.BulkMessageHandlerHolder<?> bulkHandlerHolder = bulkHandlerHolders.get(topic);
if (bulkHandlerHolder != null) {
handle(topic, bulkHandlerHolder, records, longProcessThreshold(batchLongProcessThresholdInNano, records.size(), count));
} else {
KafkaMessageListener.MessageHandlerHolder<?> handlerHolder = handlerHolders.get(topic);
if (handlerHolder != null) {
handle(topic, handlerHolder, records, longProcessThreshold(batchLongProcessThresholdInNano, 1, count));
}
}
}
} finally {
consumer.commitAsync();
logger.info("process kafka records, count={}, size={}, elapsedTime={}", count, size, watch.elapsedTime());
}
}
Aggregations