use of kafka.javaapi.message.ByteBufferMessageSet in project elasticsearch-river-kafka by endgameinc.
the class KafkaClientTest method testFetch.
public void testFetch() {
expect(mockConsumer.fetch(anyObject(FetchRequest.class))).andReturn(new ByteBufferMessageSet(Collections.EMPTY_LIST));
replay(mockConsumer, mockCurator);
client.fetch("my_topic", 0, 1717, 1024);
verify(mockConsumer, mockCurator);
}
use of kafka.javaapi.message.ByteBufferMessageSet in project jstorm by alibaba.
the class KafkaConsumer method fetchMessages.
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {
String topic = config.topic;
FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes).maxWait(config.fetchWaitMaxMs).build();
FetchResponse fetchResponse = null;
SimpleConsumer simpleConsumer = null;
try {
simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
// LOG.error(message);
return null;
}
fetchResponse = simpleConsumer.fetch(req);
} catch (Exception e) {
if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) {
LOG.warn("Network error when fetching messages:", e);
if (simpleConsumer != null) {
String host = simpleConsumer.host();
int port = simpleConsumer.port();
simpleConsumer = null;
throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
}
} else {
throw new RuntimeException(e);
}
}
if (fetchResponse.hasError()) {
short code = fetchResponse.errorCode(topic, partition);
if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
long startOffset = getOffset(topic, partition, config.startOffsetTime);
offset = startOffset;
}
if (leaderBroker != null) {
LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition[" + partition + "] error:" + code);
} else {
}
return null;
} else {
ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
return msgs;
}
}
use of kafka.javaapi.message.ByteBufferMessageSet in project flink by apache.
the class SimpleConsumerThread method run.
// ------------------------------------------------------------------------
// main work loop
// ------------------------------------------------------------------------
@Override
public void run() {
LOG.info("Starting to fetch from {}", this.partitions);
// set up the config values
final String clientId = "flink-kafka-consumer-legacy-" + broker.id();
try {
// create the Kafka consumer that we actually use for fetching
consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId);
// replace earliest of latest starting offsets with actual offset values fetched from Kafka
requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, partitions);
LOG.info("Starting to consume {} partitions with consumer thread {}", partitions.size(), getName());
// Now, the actual work starts :-)
int offsetOutOfRangeCount = 0;
int reconnects = 0;
while (running) {
// ----------------------------------- partitions list maintenance ----------------------------
// check queue for new partitions to read from:
List<KafkaTopicPartitionState<TopicAndPartition>> newPartitions = newPartitionsQueue.pollBatch();
if (newPartitions != null) {
// found some new partitions for this thread's broker
// the new partitions should already be assigned a starting offset
checkAllPartitionsHaveDefinedStartingOffsets(newPartitions);
// if the new partitions are to start from earliest or latest offsets,
// we need to replace them with actual values from Kafka
requestAndSetEarliestOrLatestOffsetsFromKafka(consumer, newPartitions);
// add the new partitions (and check they are not already in there)
for (KafkaTopicPartitionState<TopicAndPartition> newPartition : newPartitions) {
if (partitions.contains(newPartition)) {
throw new IllegalStateException("Adding partition " + newPartition + " to subscribed partitions even though it is already subscribed");
}
partitions.add(newPartition);
}
LOG.info("Adding {} new partitions to consumer thread {}", newPartitions.size(), getName());
LOG.debug("Partitions list: {}", newPartitions);
}
if (partitions.size() == 0) {
if (newPartitionsQueue.close()) {
// close succeeded. Closing thread
running = false;
LOG.info("Consumer thread {} does not have any partitions assigned anymore. Stopping thread.", getName());
// add the wake-up marker into the queue to make the main thread
// immediately wake up and termination faster
unassignedPartitions.add(MARKER);
break;
} else {
// go to top of loop again and get the new partitions
continue;
}
}
// ----------------------------------- request / response with kafka ----------------------------
FetchRequestBuilder frb = new FetchRequestBuilder();
frb.clientId(clientId);
frb.maxWait(maxWait);
frb.minBytes(minBytes);
for (KafkaTopicPartitionState<?> partition : partitions) {
frb.addFetch(partition.getKafkaTopicPartition().getTopic(), partition.getKafkaTopicPartition().getPartition(), // request the next record
partition.getOffset() + 1, fetchSize);
}
kafka.api.FetchRequest fetchRequest = frb.build();
LOG.debug("Issuing fetch request {}", fetchRequest);
FetchResponse fetchResponse;
try {
fetchResponse = consumer.fetch(fetchRequest);
} catch (Throwable cce) {
//noinspection ConstantConditions
if (cce instanceof ClosedChannelException) {
LOG.warn("Fetch failed because of ClosedChannelException.");
LOG.debug("Full exception", cce);
// retry a few times, then return ALL partitions for new leader lookup
if (++reconnects >= reconnectLimit) {
LOG.warn("Unable to reach broker after {} retries. Returning all current partitions", reconnectLimit);
for (KafkaTopicPartitionState<TopicAndPartition> fp : this.partitions) {
unassignedPartitions.add(fp);
}
this.partitions.clear();
// jump to top of loop: will close thread or subscribe to new partitions
continue;
}
try {
consumer.close();
} catch (Throwable t) {
LOG.warn("Error while closing consumer connection", t);
}
// delay & retry
Thread.sleep(100);
consumer = new SimpleConsumer(broker.host(), broker.port(), soTimeout, bufferSize, clientId);
// retry
continue;
} else {
throw cce;
}
}
reconnects = 0;
if (fetchResponse == null) {
throw new IOException("Fetch from Kafka failed (request returned null)");
}
if (fetchResponse.hasError()) {
String exception = "";
List<KafkaTopicPartitionState<TopicAndPartition>> partitionsToGetOffsetsFor = new ArrayList<>();
// iterate over partitions to get individual error codes
Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator();
boolean partitionsRemoved = false;
while (partitionsIterator.hasNext()) {
final KafkaTopicPartitionState<TopicAndPartition> fp = partitionsIterator.next();
short code = fetchResponse.errorCode(fp.getTopic(), fp.getPartition());
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// we were asked to read from an out-of-range-offset (maybe set wrong in Zookeeper)
// Kafka's high level consumer is resetting the offset according to 'auto.offset.reset'
partitionsToGetOffsetsFor.add(fp);
} else if (code == ErrorMapping.NotLeaderForPartitionCode() || code == ErrorMapping.LeaderNotAvailableCode() || code == ErrorMapping.BrokerNotAvailableCode() || code == ErrorMapping.UnknownCode()) {
// the broker we are connected to is not the leader for the partition.
LOG.warn("{} is not the leader of {}. Reassigning leader for partition", broker, fp);
LOG.debug("Error code = {}", code);
unassignedPartitions.add(fp);
// unsubscribe the partition ourselves
partitionsIterator.remove();
partitionsRemoved = true;
} else if (code != ErrorMapping.NoError()) {
exception += "\nException for " + fp.getTopic() + ":" + fp.getPartition() + ": " + StringUtils.stringifyException(ErrorMapping.exceptionFor(code));
}
}
if (partitionsToGetOffsetsFor.size() > 0) {
// safeguard against an infinite loop.
if (offsetOutOfRangeCount++ > 3) {
throw new RuntimeException("Found invalid offsets more than three times in partitions " + partitionsToGetOffsetsFor + " Exceptions: " + exception);
}
// get valid offsets for these partitions and try again.
LOG.warn("The following partitions had an invalid offset: {}", partitionsToGetOffsetsFor);
requestAndSetSpecificTimeOffsetsFromKafka(consumer, partitionsToGetOffsetsFor, invalidOffsetBehavior);
LOG.warn("The new partition offsets are {}", partitionsToGetOffsetsFor);
// jump back to create a new fetch request. The offset has not been touched.
continue;
} else if (partitionsRemoved) {
// create new fetch request
continue;
} else {
// partitions failed on an error
throw new IOException("Error while fetching from broker '" + broker + "': " + exception);
}
} else {
// successful fetch, reset offsetOutOfRangeCount.
offsetOutOfRangeCount = 0;
}
// ----------------------------------- process fetch response ----------------------------
int messagesInFetch = 0;
int deletedMessages = 0;
Iterator<KafkaTopicPartitionState<TopicAndPartition>> partitionsIterator = partitions.iterator();
partitionsLoop: while (partitionsIterator.hasNext()) {
final KafkaTopicPartitionState<TopicAndPartition> currentPartition = partitionsIterator.next();
final ByteBufferMessageSet messageSet = fetchResponse.messageSet(currentPartition.getTopic(), currentPartition.getPartition());
for (MessageAndOffset msg : messageSet) {
if (running) {
messagesInFetch++;
final ByteBuffer payload = msg.message().payload();
final long offset = msg.offset();
if (offset <= currentPartition.getOffset()) {
// we have seen this message already
LOG.info("Skipping message with offset " + msg.offset() + " because we have seen messages until (including) " + currentPartition.getOffset() + " from topic/partition " + currentPartition.getTopic() + '/' + currentPartition.getPartition() + " already");
continue;
}
// If the message value is null, this represents a delete command for the message key.
// Log this and pass it on to the client who might want to also receive delete messages.
byte[] valueBytes;
if (payload == null) {
deletedMessages++;
valueBytes = null;
} else {
valueBytes = new byte[payload.remaining()];
payload.get(valueBytes);
}
// put key into byte array
byte[] keyBytes = null;
int keySize = msg.message().keySize();
if (keySize >= 0) {
// message().hasKey() is doing the same. We save one int deserialization
ByteBuffer keyPayload = msg.message().key();
keyBytes = new byte[keySize];
keyPayload.get(keyBytes);
}
final T value = deserializer.deserialize(keyBytes, valueBytes, currentPartition.getTopic(), currentPartition.getPartition(), offset);
if (deserializer.isEndOfStream(value)) {
// remove partition from subscribed partitions.
partitionsIterator.remove();
continue partitionsLoop;
}
owner.emitRecord(value, currentPartition, offset);
} else {
// no longer running
return;
}
}
}
LOG.debug("This fetch contained {} messages ({} deleted messages)", messagesInFetch, deletedMessages);
}
if (!newPartitionsQueue.close()) {
throw new Exception("Bug: Cleanly leaving fetcher thread without having a closed queue.");
}
} catch (Throwable t) {
// report to the fetcher's error handler
errorHandler.reportError(t);
} finally {
if (consumer != null) {
// closing the consumer should not fail the program
try {
consumer.close();
} catch (Throwable t) {
LOG.error("Error while closing the Kafka simple consumer", t);
}
}
}
}
use of kafka.javaapi.message.ByteBufferMessageSet in project heron by twitter.
the class KafkaUtilsTest method generateTuplesWithKeyAndKeyValueScheme.
@Test
public void generateTuplesWithKeyAndKeyValueScheme() {
config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
config.useStartOffsetTimeIfOffsetOutOfRange = false;
String value = "value";
String key = "key";
createTopicAndSendMessage(key, value);
ByteBufferMessageSet messageAndOffsets = getLastMessage();
for (MessageAndOffset msg : messageAndOffsets) {
Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0));
}
}
use of kafka.javaapi.message.ByteBufferMessageSet in project heron by twitter.
the class TestUtils method verifyMessage.
public static boolean verifyMessage(String key, String message, KafkaTestBroker broker, SimpleConsumer simpleConsumer) {
long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, TestUtils.TOPIC, 0, OffsetRequest.LatestTime()) - 1;
ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(TestUtils.getKafkaConfig(broker), simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), TestUtils.TOPIC, 0), lastMessageOffset);
MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
Message kafkaMessage = messageAndOffset.message();
ByteBuffer messageKeyBuffer = kafkaMessage.key();
String keyString = null;
String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
if (messageKeyBuffer != null) {
keyString = new String(Utils.toByteArray(messageKeyBuffer));
}
assertEquals(key, keyString);
assertEquals(message, messageString);
return true;
}
Aggregations