use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class AbstractTaskTest method shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException.
@Test(expected = WakeupException.class)
public void shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException() throws Exception {
final Consumer consumer = mockConsumer(new WakeupException());
final AbstractTask task = createTask(consumer);
task.initializeOffsetLimits();
}
use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.
the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException.
@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException() throws Exception {
final Consumer consumer = mockConsumer(new KafkaException("blah"));
final AbstractTask task = createTask(consumer);
task.initializeOffsetLimits();
}
use of org.apache.kafka.clients.consumer.Consumer in project metron by apache.
the class KafkaServiceImpl method getSampleMessage.
@Override
public String getSampleMessage(final String topic) {
String message = null;
if (listTopics().contains(topic)) {
try (Consumer<String, String> kafkaConsumer = kafkaConsumerFactory.createConsumer()) {
kafkaConsumer.assign(kafkaConsumer.partitionsFor(topic).stream().map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition())).collect(Collectors.toList()));
kafkaConsumer.assignment().stream().filter(p -> (kafkaConsumer.position(p) - 1) >= 0).forEach(p -> kafkaConsumer.seek(p, kafkaConsumer.position(p) - 1));
final ConsumerRecords<String, String> records = kafkaConsumer.poll(KAFKA_CONSUMER_TIMEOUT);
message = records.isEmpty() ? null : records.iterator().next().value();
kafkaConsumer.unsubscribe();
}
}
return message;
}
use of org.apache.kafka.clients.consumer.Consumer in project cruise-control by linkedin.
the class KafkaSampleStore method loadSamples.
@Override
public void loadSamples(SampleLoader sampleLoader) {
LOG.info("Starting loading samples.");
long startMs = System.currentTimeMillis();
AtomicLong numPartitionMetricSamples = new AtomicLong(0L);
AtomicLong numBrokerMetricSamples = new AtomicLong(0L);
AtomicLong totalSamples = new AtomicLong(0L);
AtomicLong numLoadedSamples = new AtomicLong(0L);
try {
prepareConsumers();
for (KafkaConsumer<byte[], byte[]> consumer : _consumers) {
_metricProcessorExecutor.submit(new MetricLoader(consumer, sampleLoader, numLoadedSamples, numPartitionMetricSamples, numBrokerMetricSamples, totalSamples));
}
// Blocking waiting for the metric loading to finish.
_metricProcessorExecutor.shutdown();
_metricProcessorExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
} catch (Exception e) {
LOG.error("Received exception when loading samples", e);
} finally {
_consumers.forEach(Consumer::close);
try {
_metricProcessorExecutor.awaitTermination(30000, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
LOG.warn("Interrupted during waiting for metrics processor to shutdown.");
}
}
LOG.info("Sample loading finished. Loaded {} partition metrics samples and {} broker metric samples in {} ms", numPartitionMetricSamples, numBrokerMetricSamples, System.currentTimeMillis() - startMs);
}
use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.
the class RestoreIntegrationTest method createStateForRestoration.
private void createStateForRestoration() throws ExecutionException, InterruptedException {
final Properties producerConfig = new Properties();
producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
try (final KafkaProducer<Integer, Integer> producer = new KafkaProducer<>(producerConfig, new IntegerSerializer(), new IntegerSerializer())) {
for (int i = 0; i < numberOfKeys; i++) {
producer.send(new ProducerRecord<>(INPUT_STREAM, i, i));
}
}
final Properties consumerConfig = new Properties();
consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers());
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, applicationId);
consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
final Consumer consumer = new KafkaConsumer(consumerConfig);
final List<TopicPartition> partitions = Arrays.asList(new TopicPartition(INPUT_STREAM, 0), new TopicPartition(INPUT_STREAM, 1));
consumer.assign(partitions);
consumer.seekToEnd(partitions);
final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
for (TopicPartition partition : partitions) {
final long position = consumer.position(partition);
offsets.put(partition, new OffsetAndMetadata(position + 1));
}
consumer.commitSync(offsets);
consumer.close();
}
Aggregations