use of org.graylog2.plugin.journal.RawMessage in project graylog2-server by Graylog2.
the class KafkaTransport method doLaunch.
@Override
public void doLaunch(final MessageInput input) throws MisfireException {
serverStatus.awaitRunning(new Runnable() {
@Override
public void run() {
lifecycleStateChange(Lifecycle.RUNNING);
}
});
// listen for lifecycle changes
serverEventBus.register(this);
final Properties props = new Properties();
props.put("group.id", GROUP_ID);
props.put("client.id", "gl2-" + nodeId + "-" + input.getId());
props.put("fetch.min.bytes", String.valueOf(configuration.getInt(CK_FETCH_MIN_BYTES)));
props.put("fetch.wait.max.ms", String.valueOf(configuration.getInt(CK_FETCH_WAIT_MAX)));
props.put("zookeeper.connect", configuration.getString(CK_ZOOKEEPER));
// Default auto commit interval is 60 seconds. Reduce to 1 second to minimize message duplication
// if something breaks.
props.put("auto.commit.interval.ms", "1000");
// Set a consumer timeout to avoid blocking on the consumer iterator.
props.put("consumer.timeout.ms", "1000");
final int numThreads = configuration.getInt(CK_THREADS);
final ConsumerConfig consumerConfig = new ConsumerConfig(props);
cc = Consumer.createJavaConsumerConnector(consumerConfig);
final TopicFilter filter = new Whitelist(configuration.getString(CK_TOPIC_FILTER));
final List<KafkaStream<byte[], byte[]>> streams = cc.createMessageStreamsByFilter(filter, numThreads);
final ExecutorService executor = executorService(numThreads);
// this is being used during shutdown to first stop all submitted jobs before committing the offsets back to zookeeper
// and then shutting down the connection.
// this is to avoid yanking away the connection from the consumer runnables
stopLatch = new CountDownLatch(streams.size());
for (final KafkaStream<byte[], byte[]> stream : streams) {
executor.submit(new Runnable() {
@Override
public void run() {
final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();
boolean retry;
do {
retry = false;
try {
// noinspection WhileLoopReplaceableByForEach
while (consumerIterator.hasNext()) {
if (paused) {
// we try not to spin here, so we wait until the lifecycle goes back to running.
LOG.debug("Message processing is paused, blocking until message processing is turned back on.");
Uninterruptibles.awaitUninterruptibly(pausedLatch);
}
// check for being stopped before actually getting the message, otherwise we could end up losing that message
if (stopped) {
break;
}
if (isThrottled()) {
blockUntilUnthrottled();
}
// process the message, this will immediately mark the message as having been processed. this gets tricky
// if we get an exception about processing it down below.
final MessageAndMetadata<byte[], byte[]> message = consumerIterator.next();
final byte[] bytes = message.message();
// it is possible that the message is null
if (bytes == null) {
continue;
}
totalBytesRead.addAndGet(bytes.length);
lastSecBytesReadTmp.addAndGet(bytes.length);
final RawMessage rawMessage = new RawMessage(bytes);
// TODO implement throttling
input.processRawMessage(rawMessage);
}
} catch (ConsumerTimeoutException e) {
// Happens when there is nothing to consume, retry to check again.
retry = true;
} catch (Exception e) {
LOG.error("Kafka consumer error, stopping consumer thread.", e);
}
} while (retry && !stopped);
// explicitly commit our offsets when stopping.
// this might trigger a couple of times, but it won't hurt
cc.commitOffsets();
stopLatch.countDown();
}
});
}
scheduler.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
lastSecBytesRead.set(lastSecBytesReadTmp.getAndSet(0));
}
}, 1, 1, TimeUnit.SECONDS);
}
use of org.graylog2.plugin.journal.RawMessage in project graylog2-server by Graylog2.
the class JournalReader method run.
@Override
protected void run() throws Exception {
try {
requestedReadCount = metricRegistry.register(name(this.getClass(), "requestedReadCount"), new HdrHistogram(processBuffer.getRingBufferSize() + 1, 3));
} catch (IllegalArgumentException e) {
log.warn("Metric already exists", e);
throw e;
}
while (isRunning()) {
// TODO interfere with reading if we are not 100% certain we should be reading, see #listenForLifecycleChanges
if (!shouldBeReading) {
Uninterruptibles.sleepUninterruptibly(100, MILLISECONDS);
// don't read immediately, but check if we should be shutting down.
continue;
}
// approximate count to read from the journal to backfill the processing chain
final long remainingCapacity = processBuffer.getRemainingCapacity();
requestedReadCount.update(remainingCapacity);
final List<Journal.JournalReadEntry> encodedRawMessages = journal.read(remainingCapacity);
if (encodedRawMessages.isEmpty()) {
log.debug("No messages to read from Journal, waiting until the writer adds more messages.");
// block until something is written to the journal again
try {
readBlocked.inc();
journalFilled.acquire();
} catch (InterruptedException ignored) {
// this can happen when we are blocked but the system wants to shut down. We don't have to do anything in that case.
continue;
}
log.debug("Messages have been written to Journal, continuing to read.");
// we don't care how many messages were inserted in the meantime, we'll read all of them eventually
journalFilled.drainPermits();
} else {
readMessages.mark(encodedRawMessages.size());
log.debug("Processing {} messages from journal.", encodedRawMessages.size());
for (final Journal.JournalReadEntry encodedRawMessage : encodedRawMessages) {
final RawMessage rawMessage = RawMessage.decode(encodedRawMessage.getPayload(), encodedRawMessage.getOffset());
if (rawMessage == null) {
// never insert null objects into the ringbuffer, as that is useless
log.error("Found null raw message!");
journal.markJournalOffsetCommitted(encodedRawMessage.getOffset());
continue;
}
processBuffer.insertBlocking(rawMessage);
}
}
}
log.info("Stopping.");
}
use of org.graylog2.plugin.journal.RawMessage in project graylog2-server by Graylog2.
the class JournalDecode method runCommand.
@Override
protected void runCommand() {
Range<Long> range;
try {
final List<String> offsets = Splitter.on("..").limit(2).splitToList(rangeArg);
if (offsets.size() == 1) {
range = Range.singleton(Long.valueOf(offsets.get(0)));
} else if (offsets.size() == 2) {
final String first = offsets.get(0);
final String second = offsets.get(1);
if (first.isEmpty()) {
range = Range.atMost(Long.valueOf(second));
} else if (second.isEmpty()) {
range = Range.atLeast(Long.valueOf(first));
} else {
range = Range.closed(Long.valueOf(first), Long.valueOf(second));
}
} else {
throw new RuntimeException();
}
} catch (Exception e) {
System.err.println("Malformed offset range: " + rangeArg);
return;
}
final Map<String, Codec.Factory<? extends Codec>> codecFactory = injector.getInstance(Key.get(new TypeLiteral<Map<String, Codec.Factory<? extends Codec>>>() {
}));
final Long readOffset = range.lowerEndpoint();
final long count = range.upperEndpoint() - range.lowerEndpoint() + 1;
final List<Journal.JournalReadEntry> entries = journal.read(readOffset, count);
for (final Journal.JournalReadEntry entry : entries) {
final RawMessage raw = RawMessage.decode(entry.getPayload(), entry.getOffset());
if (raw == null) {
System.err.println(MessageFormatter.format("Journal entry at offset {} failed to decode", entry.getOffset()));
continue;
}
final Codec codec = codecFactory.get(raw.getCodecName()).create(raw.getCodecConfig());
final Message message = codec.decode(raw);
if (message == null) {
System.err.println(MessageFormatter.format("Could not use codec {} to decode raw message id {} at offset {}", new Object[] { raw.getCodecName(), raw.getId(), entry.getOffset() }));
} else {
message.setMessageQueueId(raw.getMessageQueueId());
}
final ResolvableInetSocketAddress remoteAddress = raw.getRemoteAddress();
final String remote = remoteAddress == null ? "unknown address" : remoteAddress.getInetSocketAddress().toString();
final StringBuffer sb = new StringBuffer();
sb.append("Message ").append(raw.getId()).append('\n').append(" at ").append(raw.getTimestamp()).append('\n').append(" in format ").append(raw.getCodecName()).append('\n').append(" at offset ").append(raw.getMessageQueueId()).append('\n').append(" received from remote address ").append(remote).append('\n').append(" (source field: ").append(message == null ? "unparsed" : message.getSource()).append(')').append('\n');
if (message != null) {
sb.append(" contains ").append(message.getFieldNames().size()).append(" fields.");
} else {
sb.append("The message could not be parse by the given codec.");
}
System.out.println(sb);
}
}
use of org.graylog2.plugin.journal.RawMessage in project graylog2-server by Graylog2.
the class GelfCodec method decode.
@Nullable
@Override
public Message decode(@Nonnull final RawMessage rawMessage) {
final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress());
final String json = gelfMessage.getJSON(decompressSizeLimit);
final JsonNode node;
try {
node = objectMapper.readTree(json);
if (node == null) {
throw new IOException("null result");
}
} catch (final Exception e) {
log.error("Could not parse JSON, first 400 characters: " + StringUtils.abbreviate(json, 403), e);
throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e);
}
try {
validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress());
} catch (IllegalArgumentException e) {
log.trace("Invalid GELF message <{}>", node);
throw e;
}
// Timestamp.
final double messageTimestamp = timestampValue(node);
final DateTime timestamp;
if (messageTimestamp <= 0) {
timestamp = rawMessage.getTimestamp();
} else {
// we treat this as a unix timestamp
timestamp = Tools.dateTimeFromDouble(messageTimestamp);
}
final Message message = new Message(stringValue(node, "short_message"), stringValue(node, "host"), timestamp);
message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message"));
final String file = stringValue(node, "file");
if (file != null && !file.isEmpty()) {
message.addField("file", file);
}
final long line = longValue(node, "line");
if (line > -1) {
message.addField("line", line);
}
// Level is set by server if not specified by client.
final int level = intValue(node, "level");
if (level > -1) {
message.addField("level", level);
}
// Facility is set by server if not specified by client.
final String facility = stringValue(node, "facility");
if (facility != null && !facility.isEmpty()) {
message.addField("facility", facility);
}
// Add additional data if there is some.
final Iterator<Map.Entry<String, JsonNode>> fields = node.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
String key = entry.getKey();
// Do not index useless GELF "version" field.
if ("version".equals(key)) {
continue;
}
// Don't include GELF syntax underscore in message field key.
if (key.startsWith("_") && key.length() > 1) {
key = key.substring(1);
}
// We already set short_message and host as message and source. Do not add as fields again.
if ("short_message".equals(key) || "host".equals(key)) {
continue;
}
// Skip standard or already set fields.
if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) {
continue;
}
// Convert JSON containers to Strings, and pick a suitable number representation.
final JsonNode value = entry.getValue();
final Object fieldValue;
if (value.isContainerNode()) {
fieldValue = value.toString();
} else if (value.isFloatingPointNumber()) {
fieldValue = value.asDouble();
} else if (value.isIntegralNumber()) {
fieldValue = value.asLong();
} else if (value.isNull()) {
log.debug("Field [{}] is NULL. Skipping.", key);
continue;
} else if (value.isTextual()) {
fieldValue = value.asText();
} else {
log.debug("Field [{}] has unknown value type. Skipping.", key);
continue;
}
message.addField(key, fieldValue);
}
return message;
}
use of org.graylog2.plugin.journal.RawMessage in project graylog2-server by Graylog2.
the class AmqpConsumer method run.
public void run() throws IOException {
if (!isConnected()) {
connect();
}
for (int i = 0; i < parallelQueues; i++) {
final String queueName = String.format(Locale.ENGLISH, queue, i);
channel.queueDeclare(queueName, true, false, false, null);
if (exchangeBind) {
channel.queueBind(queueName, exchange, routingKey);
}
channel.basicConsume(queueName, false, new DefaultConsumer(channel) {
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
long deliveryTag = envelope.getDeliveryTag();
try {
totalBytesRead.addAndGet(body.length);
lastSecBytesReadTmp.addAndGet(body.length);
final RawMessage rawMessage = new RawMessage(body);
// TODO figure out if we want to unsubscribe after a certain time, or if simply blocking is enough here
if (amqpTransport.isThrottled()) {
amqpTransport.blockUntilUnthrottled();
}
sourceInput.processRawMessage(rawMessage);
channel.basicAck(deliveryTag, false);
} catch (Exception e) {
LOG.error("Error while trying to process AMQP message", e);
if (channel.isOpen()) {
channel.basicNack(deliveryTag, false, requeueInvalid);
if (LOG.isDebugEnabled()) {
if (requeueInvalid) {
LOG.debug("Re-queue message with delivery tag {}", deliveryTag);
} else {
LOG.debug("Message with delivery tag {} not re-queued", deliveryTag);
}
}
}
}
}
});
}
}
Aggregations