use of kafka.javaapi.consumer.SimpleConsumer in project elasticsearch-river-kafka by endgameinc.
the class KafkaClient method connect.
void connect(String zk, String broker, int port) {
try {
consumer = new SimpleConsumer(broker, port, 1000, 1024 * 1024 * 10);
curator = CuratorFrameworkFactory.newClient(zk, 1000, 15000, new RetryNTimes(5, 2000));
curator.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of kafka.javaapi.consumer.SimpleConsumer in project elasticsearch-river-kafka by endgameinc.
the class KafkaClientTest method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
mockCurator = createMock(CuratorFramework.class);
mockConsumer = createMock(SimpleConsumer.class);
final CuratorFramework cur = mockCurator;
final SimpleConsumer con = mockConsumer;
client = new KafkaClient("zookeeper", "broker", 9092) {
void connect(String zk, String broker, int port) {
this.curator = cur;
this.consumer = con;
}
;
};
}
use of kafka.javaapi.consumer.SimpleConsumer in project jstorm by alibaba.
the class KafkaConsumer method findLeaderConsumer.
private SimpleConsumer findLeaderConsumer(int partition) {
try {
if (consumer != null) {
return consumer;
}
PartitionMetadata metadata = findLeader(partition);
if (metadata == null) {
leaderBroker = null;
consumer = null;
return null;
}
leaderBroker = metadata.leader();
consumer = new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), config.socketTimeoutMs, config.socketReceiveBufferBytes, config.clientId);
return consumer;
} catch (Exception e) {
LOG.error(e.getMessage(), e);
}
return null;
}
use of kafka.javaapi.consumer.SimpleConsumer in project jstorm by alibaba.
the class KafkaConsumer method fetchMessages.
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {
String topic = config.topic;
FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes).maxWait(config.fetchWaitMaxMs).build();
FetchResponse fetchResponse = null;
SimpleConsumer simpleConsumer = null;
try {
simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
// LOG.error(message);
return null;
}
fetchResponse = simpleConsumer.fetch(req);
} catch (Exception e) {
if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) {
LOG.warn("Network error when fetching messages:", e);
if (simpleConsumer != null) {
String host = simpleConsumer.host();
int port = simpleConsumer.port();
simpleConsumer = null;
throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
}
} else {
throw new RuntimeException(e);
}
}
if (fetchResponse.hasError()) {
short code = fetchResponse.errorCode(topic, partition);
if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
long startOffset = getOffset(topic, partition, config.startOffsetTime);
offset = startOffset;
}
if (leaderBroker != null) {
LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition[" + partition + "] error:" + code);
} else {
}
return null;
} else {
ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
return msgs;
}
}
use of kafka.javaapi.consumer.SimpleConsumer in project flink by apache.
the class SimpleConsumerThread method run.
// ------------------------------------------------------------------------
// main work loop
// ------------------------------------------------------------------------
@Override
public void run() {
LOG.info("Starting to fetch from {}", this.partitions);
// set up the config values
final String clientId = "flink-kafka-consumer-legacy-" + broker.id();
try {
// create the Kafka consumer that we actually use for fetching
consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId);
// replace earliest of latest starting offsets with actual offset values fetched from Kafka
requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, partitions);
LOG.info("Starting to consume {} partitions with consumer thread {}", partitions.size(), getName());
// Now, the actual work starts :-)
int offsetOutOfRangeCount = 0;
int reconnects = 0;
while (running) {
// ----------------------------------- partitions list maintenance ----------------------------
// check queue for new partitions to read from:
List<KafkaTopicPartitionState<TopicAndPartition>> newPartitions = newPartitionsQueue.pollBatch();
if (newPartitions != null) {
// found some new partitions for this thread's broker
// the new partitions should already be assigned a starting offset
checkAllPartitionsHaveDefinedStartingOffsets(newPartitions);
// if the new partitions are to start from earliest or latest offsets,
// we need to replace them with actual values from Kafka
requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, newPartitions);
// add the new partitions (and check they are not already in there)
for (KafkaTopicPartitionState<TopicAndPartition> newPartition : newPartitions) {
if (partitions.contains(newPartition)) {
throw new IllegalStateException("Adding partition " + newPartition + " to subscribed partitions even though it is already subscribed");
}
partitions.add(newPartition);
}
LOG.info("Adding {} new partitions to consumer thread {}", newPartitions.size(), getName());
LOG.debug("Partitions list: {}", newPartitions);
}
if (partitions.size() == 0) {
if (newPartitionsQueue.close()) {
// close succeeded. Closing thread
running = false;
LOG.info("Consumer thread {} does not have any partitions assigned anymore. Stopping thread.", getName());
// add the wake-up marker into the queue to make the main thread
// immediately wake up and termination faster
unassignedPartitions.add(MARKER);
break;
} else {
// go to top of loop again and get the new partitions
continue;
}
}
// ----------------------------------- request / response with kafka ----------------------------
FetchRequestBuilder frb = new FetchRequestBuilder();
frb.clientId(clientId);
frb.maxWait(maxWait);
frb.minBytes(minBytes);
for (KafkaTopicPartitionState<?> partition : partitions) {
frb.addFetch(partition.getKafkaTopicPartition().getTopic(), partition.getKafkaTopicPartition().getPartition(), // request the next record
partition.getOffset() + 1, fetchSize);
}
kafka.api.FetchRequest fetchRequest = frb.build();
LOG.debug("Issuing fetch request {}", fetchRequest);
FetchResponse fetchResponse;
try {
fetchResponse = consumer.fetch(fetchRequest);
} catch (Throwable cce) {
//noinspection ConstantConditions
if (cce instanceof ClosedChannelException) {
LOG.warn("Fetch failed because of ClosedChannelException.");
LOG.debug("Full exception", cce);
// retry a few times, then return ALL partitions for new leader lookup
if (++reconnects >= reconnectLimit) {
LOG.warn("Unable to reach broker after {} retries. Returning all current partitions", reconnectLimit);
for (KafkaTopicPartitionState<TopicAndPartition> fp : this.partitions) {
unassignedPartitions.add(fp);
}
this.partitions.clear();
// jump to top of loop: will close thread or subscribe to new partitions
continue;
}
try {
consumer.close();
} catch (Throwable t) {
LOG.warn("Error while closing consumer connection", t);
}
// delay & retry
Thread.sleep(100);
consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId);
// retry
continue;
} else {
throw cce;
}
}
reconnects = 0;
if (fetchResponse == null) {
throw new IOException("Fetch from Kafka failed (request returned null)");
}
if (fetchResponse.hasError()) {
String exception = "";
List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToGetOffsetsFor = new ArrayList<>();
// iterate over partitions to get individual error codes
Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator();
boolean partitionsRemoved = false;
while (partitionsIterator.hasNext()) {
final KafkaTopicPartitionState<TopicAndPartition> fp = partitionsIterator.next();
short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition());
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// we were asked to read from an out-of-range-offset (maybe set wrong in Zookeeper)
// Kafka's high level consumer is resetting the offset according to 'auto.offset.reset'
partitionsToGetOffsetsFor.add(fp);
} else if (code == ErrorMapping.NotLeaderForPartitionCode() || code == ErrorMapping.LeaderNotAvailableCode() || code == ErrorMapping.BrokerNotAvailableCode() || code == ErrorMapping.UnknownCode()) {
// the broker we are connected to is not the leader for the partition.
LOG.warn("{} is not the leader of {}. Reassigning leader for partition", broker, fp);
LOG.debug("Error code = {}", code);
unassignedPartitions.add(fp);
// unsubscribe the partition ourselves
partitionsIterator.remove();
partitionsRemoved = true;
} else if (code != ErrorMapping.NoError()) {
exception += "\nException for " + fp.getTopic() + ":" + fp.getPartition() + ": " + StringUtils.stringifyException(ErrorMapping.exceptionFor(code));
}
}
if (partitionsToGetOffsetsFor.size() > 0) {
// safeguard against an infinite loop.
if (offsetOutOfRangeCount++ > 3) {
throw new RuntimeException("Found invalid offsets more than three times in partitions " + partitionsToGetOffsetsFor + " Exceptions: " + exception);
}
// get valid offsets for these partitions and try again.
LOG.warn("The following partitions had an invalid offset: {}", partitionsToGetOffsetsFor);
requestAndSetSpecificTimeOffsetsFromKafka(consumer, partitionsToGetOffsetsFor, invalidOffsetBehavior);
LOG.warn("The new partition offsets are {}", partitionsToGetOffsetsFor);
// jump back to create a new fetch request. The offset has not been touched.
continue;
} else if (partitionsRemoved) {
// create new fetch request
continue;
} else {
// partitions failed on an error
throw new IOException("Error while fetching from broker '" + broker + "': " + exception);
}
} else {
// successful fetch, reset offsetOutOfRangeCount.
offsetOutOfRangeCount = 0;
}
// ----------------------------------- process fetch response ----------------------------
int messagesInFetch = 0;
int deletedMessages = 0;
Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator();
partitionsLoop: while (partitionsIterator.hasNext()) {
final KafkaTopicPartitionState<TopicAndPartition> currentPartition = partitionsIterator.next();
final ByteBufferMessageSet messageSet = fetchResponse.messageSet(currentPartition.getTopic(), currentPartition.getPartition());
for (MessageAndOffset msg : messageSet) {
if (running) {
messagesInFetch++;
final ByteBuffer payload = msg.message().payload();
final long offset = msg.offset();
if (offset <= currentPartition.getOffset()) {
// we have seen this message already
LOG.info("Skipping message with offset " + msg.offset() + " because we have seen messages until (including) " + currentPartition.getOffset() + " from topic/partition " + currentPartition.getTopic() + '/' + currentPartition.getPartition() + " already");
continue;
}
// If the message value is null, this represents a delete command for the message key.
// Log this and pass it on to the client who might want to also receive delete messages.
byte[] valueBytes;
if (payload == null) {
deletedMessages++;
valueBytes = null;
} else {
valueBytes = new byte[payload.remaining()];
payload.get(valueBytes);
}
// put key into byte array
byte[] keyBytes = null;
int keySize = msg.message().keySize();
if (keySize >= 0) {
// message().hasKey() is doing the same. We save one int deserialization
ByteBuffer keyPayload = msg.message().key();
keyBytes = new byte[keySize];
keyPayload.get(keyBytes);
}
final T value = deserializer.deserialize(keyBytes, valueBytes, currentPartition.getTopic(), currentPartition.getPartition(), offset);
if (deserializer.isEndOfStream(value)) {
// remove partition from subscribed partitions.
partitionsIterator.remove();
continue partitionsLoop;
}
owner.emitRecord(value, currentPartition, offset);
} else {
// no longer running
return;
}
}
}
LOG.debug("This fetch contained {} messages ({} deleted messages)", messagesInFetch, deletedMessages);
}
if (!newPartitionsQueue.close()) {
throw new Exception("Bug: Cleanly leaving fetcher thread without having a closed queue.");
}
} catch (Throwable t) {
// report to the fetcher's error handler
errorHandler.reportError(t);
} finally {
if (consumer != null) {
// closing the consumer should not fail the program
try {
consumer.close();
} catch (Throwable t) {
LOG.error("Error while closing the Kafka simple consumer", t);
}
}
}
}
Aggregations