use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project druid by druid-io.
the class KafkaSupervisor method getKafkaConsumer.
private KafkaConsumer<byte[], byte[]> getKafkaConsumer() {
final Properties props = new Properties();
props.setProperty("metadata.max.age.ms", "10000");
props.setProperty("group.id", String.format("kafka-supervisor-%s", getRandomId()));
props.putAll(ioConfig.getConsumerProperties());
props.setProperty("enable.auto.commit", "false");
ClassLoader currCtxCl = Thread.currentThread().getContextClassLoader();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
return new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
} finally {
Thread.currentThread().setContextClassLoader(currCtxCl);
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class KafkaConsumerTest method testInvalidSocketSendBufferSize.
@Test(expected = KafkaException.class)
public void testInvalidSocketSendBufferSize() throws Exception {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
config.put(ConsumerConfig.SEND_BUFFER_CONFIG, -2);
new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project drill by axbaretto.
the class KafkaGroupScan method init.
/**
* Computes work per topic partition, based on start and end offset of each
* corresponding topicPartition
*/
private void init() {
partitionWorkList = Lists.newArrayList();
Collection<DrillbitEndpoint> endpoints = kafkaStoragePlugin.getContext().getBits();
Map<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
for (DrillbitEndpoint endpoint : endpoints) {
endpointMap.put(endpoint.getAddress(), endpoint);
}
Map<TopicPartition, Long> startOffsetsMap = Maps.newHashMap();
Map<TopicPartition, Long> endOffsetsMap = Maps.newHashMap();
List<PartitionInfo> topicPartitions = null;
String topicName = kafkaScanSpec.getTopicName();
try (KafkaConsumer<?, ?> kafkaConsumer = new KafkaConsumer<>(kafkaStoragePlugin.getConfig().getKafkaConsumerProps(), new ByteArrayDeserializer(), new ByteArrayDeserializer())) {
if (!kafkaConsumer.listTopics().keySet().contains(topicName)) {
throw UserException.dataReadError().message("Table '%s' does not exist", topicName).build(logger);
}
kafkaConsumer.subscribe(Arrays.asList(topicName));
// based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd functions
// evaluates lazily, seeking to the first/last offset in all partitions only
// when poll(long) or
// position(TopicPartition) are called
kafkaConsumer.poll(0);
Set<TopicPartition> assignments = kafkaConsumer.assignment();
topicPartitions = kafkaConsumer.partitionsFor(topicName);
// fetch start offsets for each topicPartition
kafkaConsumer.seekToBeginning(assignments);
for (TopicPartition topicPartition : assignments) {
startOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
}
// fetch end offsets for each topicPartition
kafkaConsumer.seekToEnd(assignments);
for (TopicPartition topicPartition : assignments) {
endOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
}
} catch (Exception e) {
throw UserException.dataReadError(e).message("Failed to fetch start/end offsets of the topic %s", topicName).addContext(e.getMessage()).build(logger);
}
// computes work for each end point
for (PartitionInfo partitionInfo : topicPartitions) {
TopicPartition topicPartition = new TopicPartition(topicName, partitionInfo.partition());
long lastCommittedOffset = startOffsetsMap.get(topicPartition);
long latestOffset = endOffsetsMap.get(topicPartition);
logger.debug("Latest offset of {} is {}", topicPartition, latestOffset);
logger.debug("Last committed offset of {} is {}", topicPartition, lastCommittedOffset);
PartitionScanWork work = new PartitionScanWork(topicPartition, lastCommittedOffset, latestOffset);
Node[] inSyncReplicas = partitionInfo.inSyncReplicas();
for (Node isr : inSyncReplicas) {
String host = isr.host();
DrillbitEndpoint ep = endpointMap.get(host);
if (ep != null) {
work.getByteMap().add(ep, work.getTotalBytes());
}
}
partitionWorkList.add(work);
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project drill by axbaretto.
the class KafkaQueriesTest method fetchOffsets.
private Map<TopicPartition, Long> fetchOffsets(int flag) {
KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(storagePluginConfig.getKafkaConsumerProps(), new ByteArrayDeserializer(), new ByteArrayDeserializer());
Map<TopicPartition, Long> offsetsMap = Maps.newHashMap();
kafkaConsumer.subscribe(Collections.singletonList(TestQueryConstants.JSON_TOPIC));
// based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd functions
// evaluates lazily, seeking to the
// first/last offset in all partitions only when poll(long) or
// position(TopicPartition) are called
kafkaConsumer.poll(0);
Set<TopicPartition> assignments = kafkaConsumer.assignment();
try {
if (flag == -2) {
// fetch start offsets for each topicPartition
kafkaConsumer.seekToBeginning(assignments);
for (TopicPartition topicPartition : assignments) {
offsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
}
} else if (flag == -1) {
// fetch end offsets for each topicPartition
kafkaConsumer.seekToEnd(assignments);
for (TopicPartition topicPartition : assignments) {
offsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
}
} else {
throw new RuntimeException(String.format("Unsupported flag %d", flag));
}
} finally {
kafkaConsumer.close();
}
return offsetsMap;
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testConstructorClose.
@Test
public void testConstructorClose() throws Exception {
Properties props = new Properties();
props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testConstructorClose");
props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "invalid-23-8409-adsfsdj");
props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
final int oldInitCount = MockMetricsReporter.INIT_COUNT.get();
final int oldCloseCount = MockMetricsReporter.CLOSE_COUNT.get();
try {
new KafkaConsumer<>(props, new ByteArrayDeserializer(), new ByteArrayDeserializer());
Assert.fail("should have caught an exception and returned");
} catch (KafkaException e) {
assertEquals(oldInitCount + 1, MockMetricsReporter.INIT_COUNT.get());
assertEquals(oldCloseCount + 1, MockMetricsReporter.CLOSE_COUNT.get());
assertEquals("Failed to construct kafka consumer", e.getMessage());
}
}
Aggregations