use of org.apache.nifi.processors.standard.syslog.SyslogParser in project nifi by apache.
the class ListenSyslog method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
// poll the queue with a small timeout to avoid unnecessarily yielding below
RawSyslogEvent rawSyslogEvent = getMessage(true, true, session);
// throttling even when no data is available
if (rawSyslogEvent == null) {
return;
}
final int maxBatchSize = context.getProperty(MAX_BATCH_SIZE).asInteger();
final String port = context.getProperty(PORT).evaluateAttributeExpressions().getValue();
final String protocol = context.getProperty(PROTOCOL).getValue();
final Map<String, String> defaultAttributes = new HashMap<>(4);
defaultAttributes.put(SyslogAttributes.PROTOCOL.key(), protocol);
defaultAttributes.put(SyslogAttributes.PORT.key(), port);
defaultAttributes.put(CoreAttributes.MIME_TYPE.key(), "text/plain");
final int numAttributes = SyslogAttributes.values().length + 2;
final boolean shouldParse = context.getProperty(PARSE_MESSAGES).asBoolean();
final Map<String, FlowFile> flowFilePerSender = new HashMap<>();
final SyslogParser parser = getParser();
for (int i = 0; i < maxBatchSize; i++) {
SyslogEvent event = null;
// If this is our first iteration, we have already polled our queues. Otherwise, poll on each iteration.
if (i > 0) {
rawSyslogEvent = getMessage(true, false, session);
if (rawSyslogEvent == null) {
break;
}
}
final String sender = rawSyslogEvent.getSender();
FlowFile flowFile = flowFilePerSender.computeIfAbsent(sender, k -> session.create());
if (shouldParse) {
boolean valid = true;
try {
event = parser.parseEvent(rawSyslogEvent.getData(), sender);
} catch (final ProcessException pe) {
getLogger().warn("Failed to parse Syslog event; routing to invalid");
valid = false;
}
// because the 'flowFile' object may already have data written to it.
if (!valid || event == null || !event.isValid()) {
FlowFile invalidFlowFile = session.create();
invalidFlowFile = session.putAllAttributes(invalidFlowFile, defaultAttributes);
if (sender != null) {
invalidFlowFile = session.putAttribute(invalidFlowFile, SyslogAttributes.SENDER.key(), sender);
}
try {
final byte[] rawBytes = rawSyslogEvent.getData();
invalidFlowFile = session.write(invalidFlowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
out.write(rawBytes);
}
});
} catch (final Exception e) {
getLogger().error("Failed to write contents of Syslog message to FlowFile due to {}; will re-queue message and try again", e);
errorEvents.offer(rawSyslogEvent);
session.remove(invalidFlowFile);
break;
}
session.transfer(invalidFlowFile, REL_INVALID);
break;
}
getLogger().trace(event.getFullMessage());
final Map<String, String> attributes = new HashMap<>(numAttributes);
attributes.put(SyslogAttributes.PRIORITY.key(), event.getPriority());
attributes.put(SyslogAttributes.SEVERITY.key(), event.getSeverity());
attributes.put(SyslogAttributes.FACILITY.key(), event.getFacility());
attributes.put(SyslogAttributes.VERSION.key(), event.getVersion());
attributes.put(SyslogAttributes.TIMESTAMP.key(), event.getTimeStamp());
attributes.put(SyslogAttributes.HOSTNAME.key(), event.getHostName());
attributes.put(SyslogAttributes.BODY.key(), event.getMsgBody());
attributes.put(SyslogAttributes.VALID.key(), String.valueOf(event.isValid()));
flowFile = session.putAllAttributes(flowFile, attributes);
}
// figure out if we should write the bytes from the raw event or parsed event
final boolean writeDemarcator = (i > 0);
try {
// write the raw bytes of the message as the FlowFile content
final byte[] rawMessage = (event == null) ? rawSyslogEvent.getData() : event.getRawMessage();
flowFile = session.append(flowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
if (writeDemarcator) {
out.write(messageDemarcatorBytes);
}
out.write(rawMessage);
}
});
} catch (final Exception e) {
getLogger().error("Failed to write contents of Syslog message to FlowFile due to {}; will re-queue message and try again", e);
errorEvents.offer(rawSyslogEvent);
break;
}
flowFilePerSender.put(sender, flowFile);
}
for (final Map.Entry<String, FlowFile> entry : flowFilePerSender.entrySet()) {
final String sender = entry.getKey();
FlowFile flowFile = entry.getValue();
if (flowFile.getSize() == 0L) {
session.remove(flowFile);
getLogger().debug("No data written to FlowFile from Sender {}; removing FlowFile", new Object[] { sender });
continue;
}
final Map<String, String> newAttributes = new HashMap<>(defaultAttributes.size() + 1);
newAttributes.putAll(defaultAttributes);
newAttributes.put(SyslogAttributes.SENDER.key(), sender);
flowFile = session.putAllAttributes(flowFile, newAttributes);
getLogger().debug("Transferring {} to success", new Object[] { flowFile });
session.transfer(flowFile, REL_SUCCESS);
session.adjustCounter("FlowFiles Transferred to Success", 1L, false);
final String senderHost = sender.startsWith("/") && sender.length() > 1 ? sender.substring(1) : sender;
final String transitUri = new StringBuilder().append(protocol.toLowerCase()).append("://").append(senderHost).append(":").append(port).toString();
session.getProvenanceReporter().receive(flowFile, transitUri);
}
}
use of org.apache.nifi.processors.standard.syslog.SyslogParser in project nifi by apache.
the class ListenSyslog method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws IOException {
final int port = context.getProperty(PORT).evaluateAttributeExpressions().asInteger();
final int bufferSize = context.getProperty(RECV_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
final int maxChannelBufferSize = context.getProperty(MAX_SOCKET_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
final int maxMessageQueueSize = context.getProperty(MAX_MESSAGE_QUEUE_SIZE).asInteger();
final String protocol = context.getProperty(PROTOCOL).getValue();
final String nicIPAddressStr = context.getProperty(NETWORK_INTF_NAME).evaluateAttributeExpressions().getValue();
final String charSet = context.getProperty(CHARSET).evaluateAttributeExpressions().getValue();
final String msgDemarcator = context.getProperty(MESSAGE_DELIMITER).getValue().replace("\\n", "\n").replace("\\r", "\r").replace("\\t", "\t");
messageDemarcatorBytes = msgDemarcator.getBytes(Charset.forName(charSet));
final int maxConnections;
if (UDP_VALUE.getValue().equals(protocol)) {
maxConnections = 1;
} else {
maxConnections = context.getProperty(MAX_CONNECTIONS).asLong().intValue();
}
bufferPool = new LinkedBlockingQueue<>(maxConnections);
for (int i = 0; i < maxConnections; i++) {
bufferPool.offer(ByteBuffer.allocate(bufferSize));
}
parser = new SyslogParser(Charset.forName(charSet));
syslogEvents = new LinkedBlockingQueue<>(maxMessageQueueSize);
InetAddress nicIPAddress = null;
if (!StringUtils.isEmpty(nicIPAddressStr)) {
NetworkInterface netIF = NetworkInterface.getByName(nicIPAddressStr);
nicIPAddress = netIF.getInetAddresses().nextElement();
}
// create either a UDP or TCP reader and call open() to bind to the given port
final SSLContextService sslContextService = context.getProperty(SSL_CONTEXT_SERVICE).asControllerService(SSLContextService.class);
channelDispatcher = createChannelReader(context, protocol, bufferPool, syslogEvents, maxConnections, sslContextService, Charset.forName(charSet));
channelDispatcher.open(nicIPAddress, port, maxChannelBufferSize);
final Thread readerThread = new Thread(channelDispatcher);
readerThread.setName("ListenSyslog [" + getIdentifier() + "]");
readerThread.setDaemon(true);
readerThread.start();
}
use of org.apache.nifi.processors.standard.syslog.SyslogParser in project nifi by apache.
the class ParseSyslog method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String charsetName = context.getProperty(CHARSET).getValue();
// If the parser already exists and uses the same charset, it does not need to be re-initialized
if (parser == null || !parser.getCharsetName().equals(charsetName)) {
parser = new SyslogParser(Charset.forName(charsetName));
}
final byte[] buffer = new byte[(int) flowFile.getSize()];
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.fillBuffer(in, buffer);
}
});
final SyslogEvent event;
try {
event = parser.parseEvent(buffer, null);
} catch (final ProcessException pe) {
getLogger().error("Failed to parse {} as a Syslog message due to {}; routing to failure", new Object[] { flowFile, pe });
session.transfer(flowFile, REL_FAILURE);
return;
}
if (event == null || !event.isValid()) {
getLogger().error("Failed to parse {} as a Syslog message: it does not conform to any of the RFC formats supported; routing to failure", new Object[] { flowFile });
session.transfer(flowFile, REL_FAILURE);
return;
}
final Map<String, String> attributes = new HashMap<>(8);
attributes.put(SyslogAttributes.PRIORITY.key(), event.getPriority());
attributes.put(SyslogAttributes.SEVERITY.key(), event.getSeverity());
attributes.put(SyslogAttributes.FACILITY.key(), event.getFacility());
attributes.put(SyslogAttributes.VERSION.key(), event.getVersion());
attributes.put(SyslogAttributes.TIMESTAMP.key(), event.getTimeStamp());
attributes.put(SyslogAttributes.HOSTNAME.key(), event.getHostName());
attributes.put(SyslogAttributes.BODY.key(), event.getMsgBody());
flowFile = session.putAllAttributes(flowFile, attributes);
session.transfer(flowFile, REL_SUCCESS);
}
Aggregations