use of kafka.message.MessageAndOffset in project storm by apache.
the class KafkaUtilsTest method runGetValueOnlyTuplesTest.
private void runGetValueOnlyTuplesTest() {
String value = "value";
createTopicAndSendMessage(null, value);
ByteBufferMessageSet messageAndOffsets = getLastMessage();
for (MessageAndOffset msg : messageAndOffsets) {
Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
assertEquals(value, lists.iterator().next().get(0));
}
}
use of kafka.message.MessageAndOffset in project storm by apache.
the class KafkaBoltTest method mockSingleMessage.
private static ByteBufferMessageSet mockSingleMessage(byte[] key, byte[] message) {
ByteBufferMessageSet sets = mock(ByteBufferMessageSet.class);
MessageAndOffset msg = mock(MessageAndOffset.class);
final List<MessageAndOffset> msgs = ImmutableList.of(msg);
doReturn(msgs.iterator()).when(sets).iterator();
Message kafkaMessage = mock(Message.class);
doReturn(ByteBuffer.wrap(key)).when(kafkaMessage).key();
doReturn(ByteBuffer.wrap(message)).when(kafkaMessage).payload();
doReturn(kafkaMessage).when(msg).message();
return sets;
}
use of kafka.message.MessageAndOffset in project storm by apache.
the class KafkaBoltTest method verifyMessage.
private boolean verifyMessage(String key, String message) {
long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), kafkaConfig.topic, 0), lastMessageOffset);
MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
Message kafkaMessage = messageAndOffset.message();
ByteBuffer messageKeyBuffer = kafkaMessage.key();
String keyString = null;
String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
if (messageKeyBuffer != null) {
keyString = new String(Utils.toByteArray(messageKeyBuffer));
}
assertEquals(key, keyString);
assertEquals(message, messageString);
return true;
}
use of kafka.message.MessageAndOffset in project pinot by linkedin.
the class LLRealtimeSegmentDataManager method consumeLoop.
protected boolean consumeLoop() throws Exception {
_fieldExtractor.resetCounters();
// No upper limit on Kafka offset
final long _endOffset = Long.MAX_VALUE;
segmentLogger.info("Starting consumption loop start offset {}, finalOffset {}", _currentOffset, _finalOffset);
while (!_shouldStop && !endCriteriaReached()) {
// Consume for the next _kafkaReadTime ms, or we get to final offset, whichever happens earlier,
// Update _currentOffset upon return from this method
Iterable<MessageAndOffset> messagesAndOffsets = null;
Long highWatermark = null;
try {
Pair<Iterable<MessageAndOffset>, Long> messagesAndWatermark = _consumerWrapper.fetchMessagesAndHighWatermark(_currentOffset, _endOffset, _kafkaStreamMetadata.getKafkaFetchTimeoutMillis());
consecutiveErrorCount = 0;
messagesAndOffsets = messagesAndWatermark.getLeft();
highWatermark = messagesAndWatermark.getRight();
} catch (TimeoutException e) {
handleTransientKafkaErrors(e);
continue;
} catch (SimpleConsumerWrapper.TransientConsumerException e) {
handleTransientKafkaErrors(e);
continue;
} catch (SimpleConsumerWrapper.PermanentConsumerException e) {
segmentLogger.warn("Kafka permanent exception when fetching messages, stopping consumption", e);
throw e;
} catch (Exception e) {
// Unknown exception from Kafka. Treat as a transient exception.
// One such exception seen so far is java.net.SocketTimeoutException
handleTransientKafkaErrors(e);
continue;
}
processKafkaEvents(messagesAndOffsets, highWatermark);
}
_serverMetrics.addMeteredTableValue(_metricKeyName, ServerMeter.ROWS_WITH_ERRORS, (long) _fieldExtractor.getTotalErrors());
_serverMetrics.addMeteredTableValue(_metricKeyName, ServerMeter.ROWS_NEEDING_CONVERSIONS, (long) _fieldExtractor.getTotalConversions());
_serverMetrics.addMeteredTableValue(_metricKeyName, ServerMeter.ROWS_WITH_NULL_VALUES, (long) _fieldExtractor.getTotalNulls());
_serverMetrics.addMeteredTableValue(_metricKeyName, ServerMeter.COLUMNS_WITH_NULL_VALUES, (long) _fieldExtractor.getTotalNullCols());
return true;
}
use of kafka.message.MessageAndOffset in project pinot by linkedin.
the class SimpleConsumerWrapper method fetchMessagesAndHighWatermark.
/**
* Fetch messages and the per-partition high watermark from Kafka between the specified offsets.
*
* @param startOffset The offset of the first message desired, inclusive
* @param endOffset The offset of the last message desired, exclusive, or {@link Long#MAX_VALUE} for no end offset.
* @param timeoutMillis Timeout in milliseconds
* @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
* milliseconds
* @return An iterable containing messages fetched from Kafka and their offsets, as well as the high watermark for
* this partition.
*/
public synchronized Pair<Iterable<MessageAndOffset>, Long> fetchMessagesAndHighWatermark(long startOffset, long endOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException {
Preconditions.checkState(!_metadataOnlyConsumer, "Cannot fetch messages from a metadata-only SimpleConsumerWrapper");
// Ensure that we're connected to the leader
// TODO Improve error handling
final long connectEndTime = System.currentTimeMillis() + _connectTimeoutMillis;
while (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < connectEndTime) {
_currentState.process();
}
if (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER && connectEndTime <= System.currentTimeMillis()) {
throw new java.util.concurrent.TimeoutException();
}
FetchResponse fetchResponse = _simpleConsumer.fetch(new FetchRequestBuilder().minBytes(100000).maxWait(timeoutMillis).addFetch(_topic, _partition, startOffset, 500000).build());
if (!fetchResponse.hasError()) {
final Iterable<MessageAndOffset> messageAndOffsetIterable = buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset);
return Pair.of(messageAndOffsetIterable, fetchResponse.highWatermark(_topic, _partition));
} else {
throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition));
}
}
Aggregations