use of kafka.message.MessageAndOffset in project metron by apache.
the class KafkaComponent method readMessages.
public List<byte[]> readMessages(String topic) {
SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer");
FetchRequest req = new FetchRequestBuilder().clientId("consumer").addFetch(topic, 0, 0, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator();
List<byte[]> messages = new ArrayList<>();
while (results.hasNext()) {
ByteBuffer payload = results.next().message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
messages.add(bytes);
}
consumer.close();
return messages;
}
use of kafka.message.MessageAndOffset in project apex-malhar by apache.
the class KafkaSimpleConsumer method run.
@Override
public void run() {
long offset = 0;
while (isAlive) {
// create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();
// FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);
// get the message set from the consumer and print them out
ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
Iterator<MessageAndOffset> itr = messages.iterator();
while (itr.hasNext() && isAlive) {
MessageAndOffset msg = itr.next();
// advance the offset after consuming each message
offset = msg.offset();
logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
receiveCount++;
}
}
}
use of kafka.message.MessageAndOffset in project incubator-gobblin by apache.
the class KafkaDeserializerExtractorTest method getMockMessageAndOffset.
private ByteArrayBasedKafkaRecord getMockMessageAndOffset(ByteBuffer payload) {
MessageAndOffset mockMessageAndOffset = mock(MessageAndOffset.class);
Message mockMessage = mock(Message.class);
when(mockMessage.payload()).thenReturn(payload);
when(mockMessageAndOffset.message()).thenReturn(mockMessage);
return new Kafka08ConsumerRecord(mockMessageAndOffset);
}
use of kafka.message.MessageAndOffset in project apache-kafka-on-k8s by banzaicloud.
the class SimpleConsumerDemo method printMessages.
private static void printMessages(ByteBufferMessageSet messageSet) throws UnsupportedEncodingException {
for (MessageAndOffset messageAndOffset : messageSet) {
ByteBuffer payload = messageAndOffset.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println(new String(bytes, "UTF-8"));
}
}
use of kafka.message.MessageAndOffset in project cdap by caskdata.
the class KafkaLogProcessorPipeline method run.
@Override
protected void run() {
runThread = Thread.currentThread();
try {
initializeOffsets();
LOG.info("Kafka offsets initialize for pipeline {} as {}", name, offsets);
Map<Integer, Future<Iterable<MessageAndOffset>>> futures = new HashMap<>();
String topic = config.getTopic();
lastCheckpointTime = System.currentTimeMillis();
while (!stopped) {
boolean hasMessageProcessed = false;
for (Map.Entry<Integer, Future<Iterable<MessageAndOffset>>> entry : fetchAll(offsets, futures).entrySet()) {
int partition = entry.getKey();
try {
if (processMessages(topic, partition, entry.getValue())) {
hasMessageProcessed = true;
}
} catch (IOException | KafkaException e) {
OUTAGE_LOG.warn("Failed to fetch or process messages from {}:{}. Will be retried in next iteration.", topic, partition, e);
}
}
long now = System.currentTimeMillis();
unSyncedEvents += appendEvents(now, false);
long nextCheckpointDelay = trySyncAndPersistCheckpoints(now);
// Sleep until the earliest event in the buffer is time to be written out.
if (!hasMessageProcessed) {
long sleepMillis = config.getEventDelayMillis();
if (!eventQueue.isEmpty()) {
sleepMillis += eventQueue.first().getTimeStamp() - now;
}
sleepMillis = Math.min(sleepMillis, nextCheckpointDelay);
if (sleepMillis > 0) {
TimeUnit.MILLISECONDS.sleep(sleepMillis);
}
}
}
} catch (InterruptedException e) {
// Interruption means stopping the service.
}
}
Aggregations