use of kafka.javaapi.consumer.SimpleConsumer in project presto by prestodb.
the class KafkaSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout) {
KafkaTableHandle kafkaTableHandle = convertLayout(layout).getTable();
SimpleConsumer simpleConsumer = consumerManager.getConsumer(selectRandom(nodes));
TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(ImmutableList.of(kafkaTableHandle.getTopicName()));
TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
for (PartitionMetadata part : metadata.partitionsMetadata()) {
log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
Broker leader = part.leader();
if (leader == null) {
// Leader election going on...
log.warn("No leader for partition %s/%s found!", metadata.topic(), part.partitionId());
continue;
}
HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());
SimpleConsumer leaderConsumer = consumerManager.getConsumer(partitionLeader);
// Kafka contains a reverse list of "end - start" pairs for the splits
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
for (int i = offsets.length - 1; i > 0; i--) {
KafkaSplit split = new KafkaSplit(connectorId, metadata.topic(), kafkaTableHandle.getKeyDataFormat(), kafkaTableHandle.getMessageDataFormat(), part.partitionId(), offsets[i], offsets[i - 1], partitionLeader);
splits.add(split);
}
}
}
return new FixedSplitSource(splits.build());
}
use of kafka.javaapi.consumer.SimpleConsumer in project jstorm by alibaba.
the class KafkaConsumer method getOffset.
public long getOffset(String topic, int partition, long startOffsetTime) {
SimpleConsumer simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
LOG.error("Error consumer is null get offset from partition:" + partition);
return -1;
}
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition);
if (offsets.length > 0) {
return offsets[0];
} else {
return NO_OFFSET;
}
}
use of kafka.javaapi.consumer.SimpleConsumer in project jstorm by alibaba.
the class KafkaConsumer method findLeader.
protected PartitionMetadata findLeader(int partition) {
PartitionMetadata returnMetaData = null;
int errors = 0;
int size = brokerList.size();
Host brokerHost = brokerList.get(brokerIndex);
try {
if (consumer == null) {
consumer = new SimpleConsumer(brokerHost.getHost(), brokerHost.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
}
} catch (Exception e) {
LOG.warn(e.getMessage(), e);
consumer = null;
}
int i = brokerIndex;
loop: while (i < size && errors < size + 1) {
Host host = brokerList.get(i);
i = (i + 1) % size;
// next index
brokerIndex = i;
try {
if (consumer == null) {
consumer = new SimpleConsumer(host.getHost(), host.getPort(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
}
List<String> topics = Collections.singletonList(config.topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = null;
try {
resp = consumer.send(req);
} catch (Exception e) {
errors += 1;
LOG.error("findLeader error, broker:" + host.toString() + ", will change to next broker index:" + (i + 1) % size);
if (consumer != null) {
consumer.close();
consumer = null;
}
continue;
}
List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
LOG.error("Error communicating with Broker:" + host.toString() + ", find Leader for partition:" + partition);
} finally {
if (consumer != null) {
consumer.close();
consumer = null;
}
}
}
return returnMetaData;
}
use of kafka.javaapi.consumer.SimpleConsumer in project bagheera by mozilla-metrics.
the class ProducerTest method countMessages.
private int countMessages() throws InvalidProtocolBufferException {
SimpleConsumer consumer = new SimpleConsumer("localhost", KAFKA_BROKER_PORT, 100, 1024);
long offset = 0l;
int messageCount = 0;
for (int i = 0; i < BATCH_SIZE; i++) {
ByteBufferMessageSet messageSet = consumer.fetch(new FetchRequest(KAFKA_TOPIC, 0, offset, 1024));
Iterator<MessageAndOffset> iterator = messageSet.iterator();
MessageAndOffset msgAndOff;
while (iterator.hasNext()) {
messageCount++;
msgAndOff = iterator.next();
offset = msgAndOff.offset();
Message message2 = msgAndOff.message();
BagheeraMessage bmsg = BagheeraMessage.parseFrom(ByteString.copyFrom(message2.payload()));
String payload = new String(bmsg.getPayload().toByteArray());
System.out.println(String.format("Message %d @%d: %s", messageCount, offset, payload));
}
}
consumer.close();
return messageCount;
}
use of kafka.javaapi.consumer.SimpleConsumer in project heron by twitter.
the class KafkaUtilsTest method brokerIsDown.
@Test(expected = FailedFetchException.class)
public void brokerIsDown() throws Exception {
int port = broker.getPort();
broker.shutdown();
SimpleConsumer aSimpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient");
try {
KafkaUtils.fetchMessages(config, aSimpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TEST_TOPIC, 0), OffsetRequest.LatestTime());
} finally {
aSimpleConsumer.close();
}
}
Aggregations