use of kafka.message.MessageAndOffset in project bagheera by mozilla-metrics.
the class ProducerTest method countMessages.
private int countMessages() throws InvalidProtocolBufferException {
SimpleConsumer consumer = new SimpleConsumer("localhost", KAFKA_BROKER_PORT, 100, 1024);
long offset = 0l;
int messageCount = 0;
for (int i = 0; i < BATCH_SIZE; i++) {
ByteBufferMessageSet messageSet = consumer.fetch(new FetchRequest(KAFKA_TOPIC, 0, offset, 1024));
Iterator<MessageAndOffset> iterator = messageSet.iterator();
MessageAndOffset msgAndOff;
while (iterator.hasNext()) {
messageCount++;
msgAndOff = iterator.next();
offset = msgAndOff.offset();
Message message2 = msgAndOff.message();
BagheeraMessage bmsg = BagheeraMessage.parseFrom(ByteString.copyFrom(message2.payload()));
String payload = new String(bmsg.getPayload().toByteArray());
System.out.println(String.format("Message %d @%d: %s", messageCount, offset, payload));
}
}
consumer.close();
return messageCount;
}
use of kafka.message.MessageAndOffset in project voltdb by VoltDB.
the class KafkaTopicPartitionImporter method accept.
@Override
protected void accept() {
info(null, "Starting partition fetcher for " + m_topicAndPartition);
long submitCount = 0;
PendingWorkTracker callbackTracker = new PendingWorkTracker();
Formatter formatter = m_config.getFormatterBuilder().create();
try {
//Start with the starting leader.
resetLeader();
int sleepCounter = 1;
while (shouldRun()) {
if (m_currentOffset.get() < 0) {
getOffsetCoordinator();
if (m_offsetManager.get() == null) {
sleepCounter = backoffSleep(sleepCounter);
continue;
}
long lastOffset = getLastOffset();
if (lastOffset == -1) {
sleepCounter = backoffSleep(sleepCounter);
continue;
}
m_gapTracker.resetTo(lastOffset);
m_lastCommittedOffset = lastOffset;
m_currentOffset.set(lastOffset);
if (m_currentOffset.get() < 0) {
//If we dont know the offset get it backoff if we fail.
sleepCounter = backoffSleep(sleepCounter);
info(null, "No valid offset found for " + m_topicAndPartition);
continue;
}
info(null, "Starting offset for " + m_topicAndPartition + " is " + m_currentOffset.get());
}
long currentFetchCount = 0;
//Build fetch request of we have a valid offset and not too many are pending.
FetchRequest req = m_fetchRequestBuilder.addFetch(m_topicAndPartition.topic(), m_topicAndPartition.partition(), m_currentOffset.get(), m_config.getFetchSize()).build();
FetchResponse fetchResponse = null;
try {
fetchResponse = m_consumer.fetch(req);
if (fetchResponse == null) {
sleepCounter = backoffSleep(sleepCounter);
continue;
}
} catch (Exception ex) {
rateLimitedLog(Level.WARN, ex, "Failed to fetch from " + m_topicAndPartition);
//See if its network error and find new leader for this partition.
if (ex instanceof IOException) {
resetLeader();
//find leader in resetLeader would sleep and backoff
continue;
}
sleepCounter = backoffSleep(sleepCounter);
continue;
}
if (fetchResponse.hasError()) {
// Something went wrong!
short code = fetchResponse.errorCode(m_topicAndPartition.topic(), m_topicAndPartition.partition());
warn(ErrorMapping.exceptionFor(code), "Failed to fetch messages for %s", m_topicAndPartition);
sleepCounter = backoffSleep(sleepCounter);
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for the last element to reset
info(null, "Invalid offset requested for " + m_topicAndPartition);
getOffsetCoordinator();
m_currentOffset.set(-1L);
continue;
}
resetLeader();
continue;
}
sleepCounter = 1;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(m_topicAndPartition.topic(), m_topicAndPartition.partition())) {
//You may be catchin up so dont sleep.
currentFetchCount++;
long currentOffset = messageAndOffset.offset();
//if currentOffset is less means we have already pushed it and also check pending queue.
if (currentOffset < m_currentOffset.get()) {
continue;
}
ByteBuffer payload = messageAndOffset.message().payload();
Object[] params = null;
try {
m_gapTracker.submit(messageAndOffset.nextOffset());
params = formatter.transform(payload);
Invocation invocation = new Invocation(m_config.getProcedure(), params);
TopicPartitionInvocationCallback cb = new TopicPartitionInvocationCallback(messageAndOffset.offset(), messageAndOffset.nextOffset(), callbackTracker, m_gapTracker, m_dead, m_pauseOffset);
if (!noTransaction) {
if (callProcedure(invocation, cb)) {
callbackTracker.produceWork();
} else {
if (isDebugEnabled()) {
debug(null, "Failed to process Invocation possibly bad data: " + Arrays.toString(params));
}
m_gapTracker.commit(messageAndOffset.nextOffset());
}
}
} catch (FormatException e) {
rateLimitedLog(Level.WARN, e, "Failed to tranform data: %s", Arrays.toString(params));
m_gapTracker.commit(messageAndOffset.nextOffset());
}
submitCount++;
m_currentOffset.set(messageAndOffset.nextOffset());
if (!shouldRun()) {
break;
}
}
if (!shouldRun()) {
break;
}
//wait to fetch more if we read nothing last time.
if (currentFetchCount == 0) {
try {
Thread.sleep(m_waitSleepMs);
} catch (InterruptedException ie) {
}
}
if (shouldCommit()) {
commitOffset(false);
}
}
} catch (Exception ex) {
error(ex, "Failed to start topic partition fetcher for " + m_topicAndPartition);
} finally {
final boolean usePausedOffset = m_pauseOffset.get() != -1;
boolean skipCommit = false;
if (usePausedOffset) {
// Paused offset is not guaranteed reliable until all callbacks have been called.
if (callbackTracker.waitForWorkToFinish() == false) {
if (m_pauseOffset.get() < m_lastCommittedOffset) {
warn(null, "Committing paused offset even though a timeout occurred waiting for pending stored procedures to finish.");
} else {
warn(null, "Refusing to commit paused offset because a timeout occurred waiting for pending stored procedures to finish.");
skipCommit = true;
}
}
}
if (skipCommit == false) {
// Force a commit. Paused offset will be re-acquired if needed.
commitOffset(usePausedOffset);
}
KafkaStreamImporterConfig.closeConsumer(m_consumer);
m_consumer = null;
BlockingChannel channel = m_offsetManager.getAndSet(null);
if (channel != null) {
try {
channel.disconnect();
} catch (Exception ignoreIt) {
}
}
}
m_dead.compareAndSet(false, true);
info(null, "Partition fetcher stopped for " + m_topicAndPartition + " Last commit point is: " + m_lastCommittedOffset + " Callback Rcvd: " + callbackTracker.getCallbackCount() + " Submitted: " + submitCount);
}
use of kafka.message.MessageAndOffset in project apex-malhar by apache.
the class AbstractExactlyOnceKafkaOutputOperator method initializeLastProcessingOffset.
private void initializeLastProcessingOffset() {
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String) getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[], byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}
use of kafka.message.MessageAndOffset in project apex-malhar by apache.
the class AbstractKafkaInputOperator method replay.
protected void replay(long windowId) {
try {
@SuppressWarnings("unchecked") Map<KafkaPartition, MutablePair<Long, Integer>> recoveredData = (Map<KafkaPartition, MutablePair<Long, Integer>>) windowDataManager.retrieve(windowId);
if (recoveredData != null) {
Map<String, List<PartitionMetadata>> pms = KafkaMetadataUtil.getPartitionsForTopic(getConsumer().brokers, getConsumer().topic);
if (pms != null) {
SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
// add all partition request in one Fretch request together
FetchRequestBuilder frb = new FetchRequestBuilder().clientId(cons.getClientId());
for (Map.Entry<KafkaPartition, MutablePair<Long, Integer>> rc : recoveredData.entrySet()) {
KafkaPartition kp = rc.getKey();
List<PartitionMetadata> pmsVal = pms.get(kp.getClusterId());
Iterator<PartitionMetadata> pmIterator = pmsVal.iterator();
PartitionMetadata pm = pmIterator.next();
while (pm.partitionId() != kp.getPartitionId()) {
if (!pmIterator.hasNext()) {
break;
}
pm = pmIterator.next();
}
if (pm.partitionId() != kp.getPartitionId()) {
continue;
}
Broker bk = pm.leader();
frb.addFetch(consumer.topic, rc.getKey().getPartitionId(), rc.getValue().left, cons.getBufferSize());
FetchRequest req = frb.build();
SimpleConsumer ksc = new SimpleConsumer(bk.host(), bk.port(), cons.getTimeout(), cons.getBufferSize(), cons.getClientId());
FetchResponse fetchResponse = ksc.fetch(req);
Integer count = 0;
for (MessageAndOffset msg : fetchResponse.messageSet(consumer.topic, kp.getPartitionId())) {
KafkaConsumer.KafkaMessage kafkaMessage = new KafkaConsumer.KafkaMessage(kp, msg.message(), msg.offset());
emitTuple(kafkaMessage);
offsetStats.put(kp, msg.offset());
count = count + 1;
if (count.equals(rc.getValue().right)) {
break;
}
}
}
}
}
if (windowId == windowDataManager.getLargestCompletedWindow()) {
// Start the consumer at the largest recovery window
SimpleKafkaConsumer cons = (SimpleKafkaConsumer) getConsumer();
// Set the offset positions to the consumer
Map<KafkaPartition, Long> currentOffsets = new HashMap<KafkaPartition, Long>(cons.getCurrentOffsets());
// Increment the offsets
for (Map.Entry<KafkaPartition, Long> e : offsetStats.entrySet()) {
currentOffsets.put(e.getKey(), e.getValue() + 1);
}
cons.resetOffset(currentOffsets);
cons.start();
}
} catch (IOException e) {
throw new RuntimeException("replay", e);
}
}
use of kafka.message.MessageAndOffset in project cdap by caskdata.
the class KafkaConsumer method fetchMessages.
/**
* Fetches Kafka messages from an offset.
* @param offset message offset to start.
* @param callback callback to handle the messages fetched.
* @return number of messages fetched.
*/
public int fetchMessages(long offset, Callback callback) throws OffsetOutOfRangeException {
ByteBufferMessageSet messageSet = fetchMessageSet(offset);
int msgCount = 0;
for (MessageAndOffset msg : messageSet) {
++msgCount;
callback.handle(msg.offset(), msg.message().payload());
}
return msgCount;
}
Aggregations