use of kafka.consumer.ConsumerConfig in project pinot by linkedin.
the class KafkaHighLevelStreamProviderConfig method getKafkaConsumerConfig.
public ConsumerConfig getKafkaConsumerConfig() {
Properties props = new Properties();
for (String key : defaultProps.keySet()) {
props.put(key, defaultProps.get(key));
}
for (String key : kafkaConsumerProps.keySet()) {
props.put(key, kafkaConsumerProps.get(key));
}
props.put("group.id", groupId);
props.put("zookeeper.connect", zkString);
return new ConsumerConfig(props);
}
use of kafka.consumer.ConsumerConfig in project flink by apache.
the class KafkaConsumerTestBase method printTopic.
private static void printTopic(String topicName, int elements, DeserializationSchema<?> deserializer) throws IOException {
// write the sequence to log for debugging purposes
Properties newProps = new Properties(standardProps);
newProps.setProperty("group.id", "topic-printer" + UUID.randomUUID().toString());
newProps.setProperty("auto.offset.reset", "smallest");
newProps.setProperty("zookeeper.connect", standardProps.getProperty("zookeeper.connect"));
newProps.putAll(secureProps);
ConsumerConfig printerConfig = new ConsumerConfig(newProps);
printTopic(topicName, printerConfig, deserializer, elements);
}
use of kafka.consumer.ConsumerConfig in project open-kilda by telstra.
the class SimpleKafkaTest method buildConsumer.
private ConsumerIterator<String, String> buildConsumer(String topic) {
Properties props = consumerProperties();
Map<String, Integer> topicCountMap = new HashMap<>();
topicCountMap.put(topic, 1);
ConsumerConfig consumerConfig = new ConsumerConfig(props);
consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, List<KafkaStream<String, String>>> consumers = consumerConnector.createMessageStreams(topicCountMap, new StringDecoder(null), new StringDecoder(null));
KafkaStream<String, String> stream = consumers.get(topic).get(0);
return stream.iterator();
}
use of kafka.consumer.ConsumerConfig in project nifi by apache.
the class PutKafkaTest method buildConsumer.
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) {
Properties props = new Properties();
props.put("zookeeper.connect", "0.0.0.0:" + kafkaLocal.getZookeeperPort());
props.put("group.id", "test");
props.put("consumer.timeout.ms", "5000");
props.put("auto.offset.reset", "smallest");
ConsumerConfig consumerConfig = new ConsumerConfig(props);
ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
Map<String, Integer> topicCountMap = new HashMap<>(1);
topicCountMap.put(topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
return iter;
}
use of kafka.consumer.ConsumerConfig in project nifi by apache.
the class GetKafka method createConsumers.
public void createConsumers(final ProcessContext context) {
final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions().getValue();
final Properties props = new Properties();
props.setProperty("zookeeper.connect", context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue());
props.setProperty("group.id", context.getProperty(GROUP_ID).evaluateAttributeExpressions().getValue());
props.setProperty("client.id", context.getProperty(CLIENT_NAME).getValue());
props.setProperty("auto.commit.interval.ms", String.valueOf(context.getProperty(ZOOKEEPER_COMMIT_DELAY).asTimePeriod(TimeUnit.MILLISECONDS)));
props.setProperty("auto.offset.reset", context.getProperty(AUTO_OFFSET_RESET).getValue());
props.setProperty("zookeeper.connection.timeout.ms", context.getProperty(ZOOKEEPER_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).toString());
props.setProperty("socket.timeout.ms", context.getProperty(KAFKA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).toString());
for (final Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
PropertyDescriptor descriptor = entry.getKey();
if (descriptor.isDynamic()) {
if (props.containsKey(descriptor.getName())) {
this.getLogger().warn("Overriding existing property '" + descriptor.getName() + "' which had value of '" + props.getProperty(descriptor.getName()) + "' with dynamically set value '" + entry.getValue() + "'.");
}
props.setProperty(descriptor.getName(), entry.getValue());
}
}
/*
* Unless user sets it to some explicit value we are setting it to the
* lowest possible value of 1 millisecond to ensure the
* consumerStream.hasNext() doesn't block. See
* http://kafka.apache.org/documentation.html#configuration) as well as
* comment in 'catch ConsumerTimeoutException' in onTrigger() for more
* explanation as to the reasoning behind it.
*/
if (!props.containsKey("consumer.timeout.ms")) {
this.getLogger().info("Setting 'consumer.timeout.ms' to 1 milliseconds to avoid consumer" + " block in the event when no events are present in Kafka topic. If you wish to change this value " + " set it as dynamic property. If you wish to explicitly enable consumer block (at your own risk)" + " set its value to -1.");
props.setProperty("consumer.timeout.ms", "1");
}
int partitionCount = KafkaUtils.retrievePartitionCountForTopic(context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue(), context.getProperty(TOPIC).evaluateAttributeExpressions().getValue());
final ConsumerConfig consumerConfig = new ConsumerConfig(props);
consumer = Consumer.createJavaConsumerConnector(consumerConfig);
final Map<String, Integer> topicCountMap = new HashMap<>(1);
int concurrentTaskToUse = context.getMaxConcurrentTasks();
if (context.getMaxConcurrentTasks() < partitionCount) {
this.getLogger().warn("The amount of concurrent tasks '" + context.getMaxConcurrentTasks() + "' configured for " + "this processor is less than the amount of partitions '" + partitionCount + "' for topic '" + context.getProperty(TOPIC).evaluateAttributeExpressions().getValue() + "'. " + "Consider making it equal to the amount of partition count for most efficient event consumption.");
} else if (context.getMaxConcurrentTasks() > partitionCount) {
concurrentTaskToUse = partitionCount;
this.getLogger().warn("The amount of concurrent tasks '" + context.getMaxConcurrentTasks() + "' configured for " + "this processor is greater than the amount of partitions '" + partitionCount + "' for topic '" + context.getProperty(TOPIC).evaluateAttributeExpressions().getValue() + "'. " + "Therefore those tasks would never see a message. To avoid that the '" + partitionCount + "'(partition count) will be used to consume events");
}
topicCountMap.put(topic, concurrentTaskToUse);
final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
this.streamIterators.clear();
for (final KafkaStream<byte[], byte[]> stream : streams) {
streamIterators.add(stream.iterator());
}
this.consumerStreamsReady.set(true);
}
Aggregations