use of kafka.message.MessageAndOffset in project cdap by caskdata.
the class KafkaLogProcessorPipeline method processMessages.
/**
* Process messages fetched from a given partition.
*/
private boolean processMessages(String topic, int partition, Future<Iterable<MessageAndOffset>> future) throws InterruptedException, KafkaException, IOException {
Iterable<MessageAndOffset> messages;
try {
messages = future.get();
} catch (ExecutionException e) {
try {
throw e.getCause();
} catch (OffsetOutOfRangeException cause) {
// This shouldn't happen under normal situation.
// If happened, usually is caused by race between kafka log rotation and fetching in here,
// hence just fetching from the beginning should be fine
offsets.put(partition, getLastOffset(partition, kafka.api.OffsetRequest.EarliestTime()));
return false;
} catch (KafkaException | IOException cause) {
throw cause;
} catch (Throwable t) {
// For other type of exceptions, just throw an IOException. It will be handled by caller.
throw new IOException(t);
}
}
boolean processed = false;
for (MessageAndOffset message : messages) {
if (eventQueue.getEventSize() >= config.getMaxBufferSize()) {
// Log a message. If this happen too often, it indicates that more memory is needed for the log processing
OUTAGE_LOG.info("Maximum queue size {} reached for pipeline {}.", config.getMaxBufferSize(), name);
// If nothing has been appended (due to error), we break the loop so that no need event will be appended
// Since the offset is not updated, the same set of messages will be fetched again in next iteration.
int eventsAppended = appendEvents(System.currentTimeMillis(), true);
if (eventsAppended <= 0) {
break;
}
unSyncedEvents += eventsAppended;
}
try {
metricsContext.increment("kafka.bytes.read", message.message().payloadSize());
ILoggingEvent loggingEvent = serializer.fromBytes(message.message().payload());
// Use the message payload size as the size estimate of the logging event
// Although it's not the same as the in memory object size, it should be just a constant factor, hence
// it is proportional to the actual object size.
eventQueue.add(loggingEvent, loggingEvent.getTimeStamp(), message.message().payloadSize(), partition, new OffsetTime(message.nextOffset(), loggingEvent.getTimeStamp()));
} catch (IOException e) {
// This shouldn't happen. In case it happens (e.g. someone published some garbage), just skip the message.
LOG.trace("Fail to decode logging event from {}:{} at offset {}. Skipping it.", topic, partition, message.offset(), e);
}
processed = true;
offsets.put(partition, message.nextOffset());
}
return processed;
}
use of kafka.message.MessageAndOffset in project jstorm by alibaba.
the class PartitionConsumer method fillMessages.
private void fillMessages() {
ByteBufferMessageSet msgs;
try {
long start = System.currentTimeMillis();
msgs = consumer.fetchMessages(partition, emittingOffset + 1);
if (msgs == null) {
LOG.error("fetch null message from offset {}", emittingOffset);
return;
}
int count = 0;
for (MessageAndOffset msg : msgs) {
count += 1;
emittingMessages.add(msg);
emittingOffset = msg.offset();
pendingOffsets.add(emittingOffset);
LOG.debug("fillmessage fetched a message:{}, offset:{}", msg.message().toString(), msg.offset());
}
long end = System.currentTimeMillis();
LOG.info("fetch message from partition:" + partition + ", offset:" + emittingOffset + ", size:" + msgs.sizeInBytes() + ", count:" + count + ", time:" + (end - start));
} catch (Exception e) {
e.printStackTrace();
LOG.error(e.getMessage(), e);
}
}
Aggregations