use of kafka.message.MessageAndOffset in project heron by twitter.
the class PartitionManager method next.
//returns false if it's reached the end of current batch
public EmitState next(SpoutOutputCollector collector) {
if (waitingToEmit.isEmpty()) {
fill();
}
while (true) {
MessageAndOffset toEmit = waitingToEmit.pollFirst();
if (toEmit == null) {
return EmitState.NO_EMITTED;
}
Iterable<List<Object>> tups;
if (spoutConfig.scheme instanceof MessageMetadataSchemeAsMultiScheme) {
tups = KafkaUtils.generateTuples((MessageMetadataSchemeAsMultiScheme) spoutConfig.scheme, toEmit.message(), partition, toEmit.offset());
} else {
tups = KafkaUtils.generateTuples(spoutConfig, toEmit.message(), partition.topic);
}
if ((tups != null) && tups.iterator().hasNext()) {
if (!Strings.isNullOrEmpty(spoutConfig.outputStreamId)) {
for (List<Object> tup : tups) {
collector.emit(spoutConfig.topic, tup, new KafkaMessageId(partition, toEmit.offset()));
}
} else {
for (List<Object> tup : tups) {
collector.emit(tup, new KafkaMessageId(partition, toEmit.offset()));
}
}
break;
} else {
ack(toEmit.offset());
}
}
if (!waitingToEmit.isEmpty()) {
return EmitState.EMITTED_MORE_LEFT;
} else {
return EmitState.EMITTED_END;
}
}
use of kafka.message.MessageAndOffset in project heron by twitter.
the class KafkaUtilsTest method generateTuplesWithMessageAndMetadataScheme.
@Test
public void generateTuplesWithMessageAndMetadataScheme() {
String value = "value";
Partition mockPartition = Mockito.mock(Partition.class);
mockPartition.partition = 0;
long offset = 0L;
MessageMetadataSchemeAsMultiScheme scheme = new MessageMetadataSchemeAsMultiScheme(new StringMessageAndMetadataScheme());
createTopicAndSendMessage(null, value);
ByteBufferMessageSet messageAndOffsets = getLastMessage();
for (MessageAndOffset msg : messageAndOffsets) {
Iterable<List<Object>> lists = KafkaUtils.generateTuples(scheme, msg.message(), mockPartition, offset);
List<Object> values = lists.iterator().next();
assertEquals("Message is incorrect", value, values.get(0));
assertEquals("Partition is incorrect", mockPartition.partition, values.get(1));
assertEquals("Offset is incorrect", offset, values.get(2));
}
}
use of kafka.message.MessageAndOffset in project heron by twitter.
the class KafkaUtilsTest method generateTuplesWithValueAndStringMultiSchemeWithTopic.
@Test
public void generateTuplesWithValueAndStringMultiSchemeWithTopic() {
config.scheme = new StringMultiSchemeWithTopic();
String value = "value";
createTopicAndSendMessage(value);
ByteBufferMessageSet messageAndOffsets = getLastMessage();
for (MessageAndOffset msg : messageAndOffsets) {
Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
List<Object> list = lists.iterator().next();
assertEquals(value, list.get(0));
assertEquals(config.topic, list.get(1));
}
}
use of kafka.message.MessageAndOffset in project heron by twitter.
the class KafkaUtilsTest method generateTuplesWithValueSchemeAndKeyValueMessage.
@Test
public void generateTuplesWithValueSchemeAndKeyValueMessage() {
config.scheme = new SchemeAsMultiScheme(new StringScheme());
String value = "value";
String key = "key";
createTopicAndSendMessage(key, value);
ByteBufferMessageSet messageAndOffsets = getLastMessage();
for (MessageAndOffset msg : messageAndOffsets) {
Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message(), config.topic);
assertEquals(value, lists.iterator().next().get(0));
}
}
use of kafka.message.MessageAndOffset in project graylog2-server by Graylog2.
the class KafkaJournal method read.
public List<JournalReadEntry> read(long readOffset, long requestedMaximumCount) {
// Always read at least one!
final long maximumCount = Math.max(1, requestedMaximumCount);
long maxOffset = readOffset + maximumCount;
if (shuttingDown) {
return Collections.emptyList();
}
final List<JournalReadEntry> messages = new ArrayList<>(Ints.saturatedCast(maximumCount));
try (Timer.Context ignored = readTime.time()) {
final long logStartOffset = getLogStartOffset();
if (readOffset < logStartOffset) {
LOG.info("Read offset {} before start of log at {}, starting to read from the beginning of the journal.", readOffset, logStartOffset);
readOffset = logStartOffset;
maxOffset = readOffset + maximumCount;
}
LOG.debug("Requesting to read a maximum of {} messages (or 5MB) from the journal, offset interval [{}, {})", maximumCount, readOffset, maxOffset);
// TODO benchmark and make read-ahead strategy configurable for performance tuning
final MessageSet messageSet = kafkaLog.read(readOffset, 5 * 1024 * 1024, Option.<Object>apply(maxOffset)).messageSet();
final Iterator<MessageAndOffset> iterator = messageSet.iterator();
long firstOffset = Long.MIN_VALUE;
long lastOffset = Long.MIN_VALUE;
long totalBytes = 0;
while (iterator.hasNext()) {
final MessageAndOffset messageAndOffset = iterator.next();
if (firstOffset == Long.MIN_VALUE)
firstOffset = messageAndOffset.offset();
// always remember the last seen offset for debug purposes below
lastOffset = messageAndOffset.offset();
final byte[] payloadBytes = ByteBufferUtils.readBytes(messageAndOffset.message().payload());
if (LOG.isTraceEnabled()) {
final byte[] keyBytes = ByteBufferUtils.readBytes(messageAndOffset.message().key());
LOG.trace("Read message {} contains {}", bytesToHex(keyBytes), bytesToHex(payloadBytes));
}
totalBytes += payloadBytes.length;
messages.add(new JournalReadEntry(payloadBytes, messageAndOffset.offset()));
// remember where to read from
nextReadOffset = messageAndOffset.nextOffset();
}
if (messages.isEmpty()) {
LOG.debug("No messages available to read for offset interval [{}, {}).", readOffset, maxOffset);
} else {
LOG.debug("Read {} messages, total payload size {}, from journal, offset interval [{}, {}], requested read at {}", messages.size(), totalBytes, firstOffset, lastOffset, readOffset);
}
} catch (OffsetOutOfRangeException e) {
// This is fine, the reader tries to read faster than the writer committed data. Next read will get the data.
LOG.debug("Offset out of range, no messages available starting at offset {}", readOffset);
} catch (Exception e) {
// sigh.
if (shuttingDown) {
LOG.debug("Caught exception during shutdown, ignoring it because we might have been blocked on a read.");
return Collections.emptyList();
}
//noinspection ConstantConditions
if (e instanceof ClosedByInterruptException) {
LOG.debug("Interrupted while reading from journal, during shutdown this is harmless and ignored.", e);
} else {
throw e;
}
}
readMessages.mark(messages.size());
return messages;
}
Aggregations