Search in sources :

Example 11 with Message

use of kafka.message.Message in project avro-kafka-storm by ransilberman.

the class MainTest method testGenericRecord.

@Test
public void testGenericRecord() throws IOException, InterruptedException {
    Schema.Parser parser = new Schema.Parser();
    Schema schema = parser.parse(getClass().getResourceAsStream("LPEvent.avsc"));
    GenericRecord datum = new GenericData.Record(schema);
    datum.put("revision", 1L);
    datum.put("siteId", "28280110");
    datum.put("eventType", "PLine");
    datum.put("timeStamp", System.currentTimeMillis());
    datum.put("sessionId", "123456II");
    Map<String, Schema> unions = new HashMap<String, Schema>();
    List<Schema> typeList = schema.getField("subrecord").schema().getTypes();
    for (Schema sch : typeList) {
        unions.put(sch.getName(), sch);
    }
    GenericRecord plineDatum = new GenericData.Record(unions.get("pline"));
    plineDatum.put("text", "How can I help you?");
    plineDatum.put("lineType", 1);
    plineDatum.put("repId", "REPID12345");
    datum.put("subrecord", plineDatum);
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    DatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(schema);
    Encoder encoder = EncoderFactory.get().binaryEncoder(out, null);
    writer.write(datum, encoder);
    encoder.flush();
    out.close();
    Message message = new Message(out.toByteArray());
    Properties props = new Properties();
    props.put("zk.connect", zkConnection);
    Producer<Message, Message> producer = new kafka.javaapi.producer.Producer<Message, Message>(new ProducerConfig(props));
    producer.send(new ProducerData<Message, Message>(topic, message));
}
Also used : Message(kafka.message.Message) HashMap(java.util.HashMap) Schema(org.apache.avro.Schema) ByteArrayOutputStream(java.io.ByteArrayOutputStream) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) Properties(java.util.Properties) Producer(kafka.javaapi.producer.Producer) Encoder(org.apache.avro.io.Encoder) ProducerConfig(kafka.producer.ProducerConfig) GenericRecord(org.apache.avro.generic.GenericRecord) GenericRecord(org.apache.avro.generic.GenericRecord) Test(org.junit.Test)

Example 12 with Message

use of kafka.message.Message in project bagheera by mozilla-metrics.

the class ProducerTest method countMessages.

private int countMessages() throws InvalidProtocolBufferException {
    SimpleConsumer consumer = new SimpleConsumer("localhost", KAFKA_BROKER_PORT, 100, 1024);
    long offset = 0l;
    int messageCount = 0;
    for (int i = 0; i < BATCH_SIZE; i++) {
        ByteBufferMessageSet messageSet = consumer.fetch(new FetchRequest(KAFKA_TOPIC, 0, offset, 1024));
        Iterator<MessageAndOffset> iterator = messageSet.iterator();
        MessageAndOffset msgAndOff;
        while (iterator.hasNext()) {
            messageCount++;
            msgAndOff = iterator.next();
            offset = msgAndOff.offset();
            Message message2 = msgAndOff.message();
            BagheeraMessage bmsg = BagheeraMessage.parseFrom(ByteString.copyFrom(message2.payload()));
            String payload = new String(bmsg.getPayload().toByteArray());
            System.out.println(String.format("Message %d @%d: %s", messageCount, offset, payload));
        }
    }
    consumer.close();
    return messageCount;
}
Also used : BagheeraMessage(com.mozilla.bagheera.BagheeraProto.BagheeraMessage) Message(kafka.message.Message) FetchRequest(kafka.api.FetchRequest) BagheeraMessage(com.mozilla.bagheera.BagheeraProto.BagheeraMessage) MessageAndOffset(kafka.message.MessageAndOffset) ByteString(com.google.protobuf.ByteString) ByteBufferMessageSet(kafka.javaapi.message.ByteBufferMessageSet) SimpleConsumer(kafka.javaapi.consumer.SimpleConsumer)

Example 13 with Message

use of kafka.message.Message in project graylog2-server by Graylog2.

the class KafkaJournal method write.

/**
     * Writes the list of entries to the journal.
     *
     * @param entries journal entries to be written
     * @return the last position written to in the journal
     */
@Override
public long write(List<Entry> entries) {
    try (Timer.Context ignored = writeTime.time()) {
        long payloadSize = 0L;
        long messageSetSize = 0L;
        long lastWriteOffset = 0L;
        final List<Message> messages = new ArrayList<>(entries.size());
        for (final Entry entry : entries) {
            final byte[] messageBytes = entry.getMessageBytes();
            final byte[] idBytes = entry.getIdBytes();
            payloadSize += messageBytes.length;
            final Message newMessage = new Message(messageBytes, idBytes);
            // Calculate the size of the new message in the message set by including the overhead for the log entry.
            final int newMessageSize = MessageSet.entrySize(newMessage);
            if (newMessageSize > maxMessageSize) {
                writeDiscardedMessages.mark();
                LOG.warn("Message with ID <{}> is too large to store in journal, skipping! (size: {} bytes / max: {} bytes)", new String(idBytes, StandardCharsets.UTF_8), newMessageSize, maxMessageSize);
                payloadSize = 0;
                continue;
            }
            // list of message to avoid a MessageSetSizeTooLargeException.
            if ((messageSetSize + newMessageSize) > maxSegmentSize) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Flushing {} bytes message set with {} messages to avoid overflowing segment with max size of {} bytes", messageSetSize, messages.size(), maxSegmentSize);
                }
                lastWriteOffset = flushMessages(messages, payloadSize);
                // Reset the messages list and size counters to start a new batch.
                messages.clear();
                messageSetSize = 0;
                payloadSize = 0;
            }
            messages.add(newMessage);
            messageSetSize += newMessageSize;
            if (LOG.isTraceEnabled()) {
                LOG.trace("Message {} contains bytes {}", bytesToHex(idBytes), bytesToHex(messageBytes));
            }
        }
        // Flush the rest of the messages.
        if (messages.size() > 0) {
            lastWriteOffset = flushMessages(messages, payloadSize);
        }
        return lastWriteOffset;
    }
}
Also used : HdrTimer(org.graylog2.shared.metrics.HdrTimer) Timer(com.codahale.metrics.Timer) Message(kafka.message.Message) ArrayList(java.util.ArrayList)

Aggregations

Message (kafka.message.Message)13 ByteBufferMessageSet (kafka.javaapi.message.ByteBufferMessageSet)5 MessageAndOffset (kafka.message.MessageAndOffset)5 ByteBuffer (java.nio.ByteBuffer)3 BagheeraMessage (com.mozilla.bagheera.BagheeraProto.BagheeraMessage)2 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 ArrayList (java.util.ArrayList)2 Properties (java.util.Properties)2 Producer (kafka.javaapi.producer.Producer)2 ProducerConfig (kafka.producer.ProducerConfig)2 Schema (org.apache.avro.Schema)2 Encoder (org.apache.avro.io.Encoder)2 BulkRequestBuilder (org.elasticsearch.action.bulk.BulkRequestBuilder)2 Test (org.junit.Test)2 Timer (com.codahale.metrics.Timer)1 ByteString (com.google.protobuf.ByteString)1 InvalidProtocolBufferException (com.google.protobuf.InvalidProtocolBufferException)1 KeyValueSink (com.mozilla.bagheera.sink.KeyValueSink)1 IOException (java.io.IOException)1 UnsupportedEncodingException (java.io.UnsupportedEncodingException)1