use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class PutEmail method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final Properties properties = this.getMailPropertiesFromFlowFile(context, flowFile);
final Session mailSession = this.createMailSession(properties);
final Message message = new MimeMessage(mailSession);
final ComponentLog logger = getLogger();
try {
message.addFrom(toInetAddresses(context, flowFile, FROM));
message.setRecipients(RecipientType.TO, toInetAddresses(context, flowFile, TO));
message.setRecipients(RecipientType.CC, toInetAddresses(context, flowFile, CC));
message.setRecipients(RecipientType.BCC, toInetAddresses(context, flowFile, BCC));
message.setHeader("X-Mailer", context.getProperty(HEADER_XMAILER).evaluateAttributeExpressions(flowFile).getValue());
message.setSubject(context.getProperty(SUBJECT).evaluateAttributeExpressions(flowFile).getValue());
String messageText = getMessage(flowFile, context, session);
String contentType = context.getProperty(CONTENT_TYPE).evaluateAttributeExpressions(flowFile).getValue();
message.setContent(messageText, contentType);
message.setSentDate(new Date());
if (context.getProperty(ATTACH_FILE).asBoolean()) {
final MimeBodyPart mimeText = new PreencodedMimeBodyPart("base64");
mimeText.setDataHandler(new DataHandler(new ByteArrayDataSource(Base64.encodeBase64(messageText.getBytes("UTF-8")), contentType + "; charset=\"utf-8\"")));
final MimeBodyPart mimeFile = new MimeBodyPart();
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream stream) throws IOException {
try {
mimeFile.setDataHandler(new DataHandler(new ByteArrayDataSource(stream, "application/octet-stream")));
} catch (final Exception e) {
throw new IOException(e);
}
}
});
mimeFile.setFileName(flowFile.getAttribute(CoreAttributes.FILENAME.key()));
MimeMultipart multipart = new MimeMultipart();
multipart.addBodyPart(mimeText);
multipart.addBodyPart(mimeFile);
message.setContent(multipart);
}
send(message);
session.getProvenanceReporter().send(flowFile, "mailto:" + message.getAllRecipients()[0].toString());
session.transfer(flowFile, REL_SUCCESS);
logger.info("Sent email as a result of receiving {}", new Object[] { flowFile });
} catch (final ProcessException | MessagingException | IOException e) {
context.yield();
logger.error("Failed to send email for {}: {}; routing to failure", new Object[] { flowFile, e.getMessage() }, e);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class PutUDP method readContent.
/**
* Helper method to read the FlowFile content stream into a byte array.
*
* @param session
* - the current process session.
* @param flowFile
* - the FlowFile to read the content from.
*
* @return byte array representation of the FlowFile content.
*/
protected byte[] readContent(final ProcessSession session, final FlowFile flowFile) {
final ByteArrayOutputStream baos = new ByteArrayOutputStream((int) flowFile.getSize() + 1);
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.copy(in, baos);
}
});
return baos.toByteArray();
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class RouteOnContent method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final List<FlowFile> flowFiles = session.get(1);
if (flowFiles.isEmpty()) {
return;
}
final AttributeValueDecorator quoteDecorator = new AttributeValueDecorator() {
@Override
public String decorate(final String attributeValue) {
return (attributeValue == null) ? null : Pattern.quote(attributeValue);
}
};
final Map<FlowFile, Set<Relationship>> flowFileDestinationMap = new HashMap<>();
final ComponentLog logger = getLogger();
final Charset charset = Charset.forName(context.getProperty(CHARACTER_SET).getValue());
final byte[] buffer = new byte[context.getProperty(BUFFER_SIZE).asDataSize(DataUnit.B).intValue()];
for (final FlowFile flowFile : flowFiles) {
final Set<Relationship> destinations = new HashSet<>();
flowFileDestinationMap.put(flowFile, destinations);
final AtomicInteger bufferedByteCount = new AtomicInteger(0);
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
bufferedByteCount.set(StreamUtils.fillBuffer(in, buffer, false));
}
});
final String contentString = new String(buffer, 0, bufferedByteCount.get(), charset);
for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
if (!descriptor.isDynamic()) {
continue;
}
final String regex = context.getProperty(descriptor).evaluateAttributeExpressions(flowFile, quoteDecorator).getValue();
final Pattern pattern = Pattern.compile(regex);
final boolean matches;
if (context.getProperty(MATCH_REQUIREMENT).getValue().equalsIgnoreCase(MATCH_ALL)) {
matches = pattern.matcher(contentString).matches();
} else {
matches = pattern.matcher(contentString).find();
}
if (matches) {
final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
destinations.add(relationship);
}
}
}
for (final Map.Entry<FlowFile, Set<Relationship>> entry : flowFileDestinationMap.entrySet()) {
FlowFile flowFile = entry.getKey();
final Set<Relationship> destinations = entry.getValue();
if (destinations.isEmpty()) {
flowFile = session.putAttribute(flowFile, ROUTE_ATTRIBUTE_KEY, REL_NO_MATCH.getName());
session.transfer(flowFile, REL_NO_MATCH);
session.getProvenanceReporter().route(flowFile, REL_NO_MATCH);
logger.info("Routing {} to 'unmatched'", new Object[] { flowFile });
} else {
final Relationship firstRelationship = destinations.iterator().next();
destinations.remove(firstRelationship);
for (final Relationship relationship : destinations) {
FlowFile clone = session.clone(flowFile);
clone = session.putAttribute(clone, ROUTE_ATTRIBUTE_KEY, relationship.getName());
session.getProvenanceReporter().route(clone, relationship);
session.transfer(clone, relationship);
logger.info("Cloning {} to {} and routing clone to {}", new Object[] { flowFile, clone, relationship });
}
flowFile = session.putAttribute(flowFile, ROUTE_ATTRIBUTE_KEY, firstRelationship.getName());
session.getProvenanceReporter().route(flowFile, firstRelationship);
session.transfer(flowFile, firstRelationship);
logger.info("Routing {} to {}", new Object[] { flowFile, firstRelationship });
}
}
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class RouteText method onTrigger.
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final FlowFile originalFlowFile = session.get();
if (originalFlowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final Charset charset = Charset.forName(context.getProperty(CHARACTER_SET).getValue());
final boolean trim = context.getProperty(TRIM_WHITESPACE).asBoolean();
final String routeStrategy = context.getProperty(ROUTE_STRATEGY).getValue();
final String matchStrategy = context.getProperty(MATCH_STRATEGY).getValue();
final boolean ignoreCase = context.getProperty(IGNORE_CASE).asBoolean();
final boolean compileRegex = matchStrategy.equals(matchesRegularExpressionValue) || matchStrategy.equals(containsRegularExpressionValue);
final boolean usePropValue = matchStrategy.equals(satisfiesExpression);
// Build up a Map of Relationship to object, where the object is the
// thing that each line is compared against
final Map<Relationship, Object> propValueMap;
final Map<Relationship, PropertyValue> propMap = this.propertyMap;
if (usePropValue) {
// If we are using an Expression Language we want a Map where the value is the
// PropertyValue, so we can just use the 'propMap' - no need to copy it.
propValueMap = (Map) propMap;
} else {
propValueMap = new HashMap<>(propMap.size());
for (final Map.Entry<Relationship, PropertyValue> entry : propMap.entrySet()) {
final String value = entry.getValue().evaluateAttributeExpressions(originalFlowFile).getValue();
propValueMap.put(entry.getKey(), compileRegex ? cachedCompiledPattern(value, ignoreCase) : value);
}
}
final Map<Relationship, Map<Group, FlowFile>> flowFileMap = new HashMap<>();
final Pattern groupPattern = groupingRegex;
session.read(originalFlowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
try (final Reader inReader = new InputStreamReader(in, charset);
final NLKBufferedReader reader = new NLKBufferedReader(inReader)) {
final Map<String, String> variables = new HashMap<>(2);
int lineCount = 0;
String line;
while ((line = reader.readLine()) != null) {
final String matchLine;
if (trim) {
matchLine = line.trim();
} else {
// Always trim off the new-line and carriage return characters before evaluating the line.
// The NLKBufferedReader maintains these characters so that when we write the line out we can maintain
// these characters. However, we don't actually want to match against these characters.
final String lineWithoutEndings;
final int indexOfCR = line.indexOf("\r");
final int indexOfNL = line.indexOf("\n");
if (indexOfCR > 0 && indexOfNL > 0) {
lineWithoutEndings = line.substring(0, Math.min(indexOfCR, indexOfNL));
} else if (indexOfCR > 0) {
lineWithoutEndings = line.substring(0, indexOfCR);
} else if (indexOfNL > 0) {
lineWithoutEndings = line.substring(0, indexOfNL);
} else {
lineWithoutEndings = line;
}
matchLine = lineWithoutEndings;
}
variables.put("line", line);
variables.put("lineNo", String.valueOf(++lineCount));
int propertiesThatMatchedLine = 0;
for (final Map.Entry<Relationship, Object> entry : propValueMap.entrySet()) {
boolean lineMatchesProperty = lineMatches(matchLine, entry.getValue(), matchStrategy, ignoreCase, originalFlowFile, variables);
if (lineMatchesProperty) {
propertiesThatMatchedLine++;
}
if (lineMatchesProperty && ROUTE_TO_MATCHING_PROPERTY_NAME.getValue().equals(routeStrategy)) {
// route each individual line to each Relationship that matches. This one matches.
final Relationship relationship = entry.getKey();
final Group group = getGroup(matchLine, groupPattern);
appendLine(session, flowFileMap, relationship, originalFlowFile, line, charset, group);
continue;
}
// break as soon as possible to avoid calculating things we don't need to calculate.
if (lineMatchesProperty && ROUTE_TO_MATCHED_WHEN_ANY_PROPERTY_MATCHES.getValue().equals(routeStrategy)) {
break;
}
if (!lineMatchesProperty && ROUTE_TO_MATCHED_WHEN_ALL_PROPERTIES_MATCH.getValue().equals(routeStrategy)) {
break;
}
}
final Relationship relationship;
if (ROUTE_TO_MATCHING_PROPERTY_NAME.getValue().equals(routeStrategy) && propertiesThatMatchedLine > 0) {
// Set relationship to null so that we do not append the line to each FlowFile again. #appendLine is called
// above within the loop, as the line may need to go to multiple different FlowFiles.
relationship = null;
} else if (ROUTE_TO_MATCHED_WHEN_ANY_PROPERTY_MATCHES.getValue().equals(routeStrategy) && propertiesThatMatchedLine > 0) {
relationship = REL_MATCH;
} else if (ROUTE_TO_MATCHED_WHEN_ALL_PROPERTIES_MATCH.getValue().equals(routeStrategy) && propertiesThatMatchedLine == propValueMap.size()) {
relationship = REL_MATCH;
} else {
relationship = REL_NO_MATCH;
}
if (relationship != null) {
final Group group = getGroup(matchLine, groupPattern);
appendLine(session, flowFileMap, relationship, originalFlowFile, line, charset, group);
}
}
}
}
});
for (final Map.Entry<Relationship, Map<Group, FlowFile>> entry : flowFileMap.entrySet()) {
final Relationship relationship = entry.getKey();
final Map<Group, FlowFile> groupToFlowFileMap = entry.getValue();
for (final Map.Entry<Group, FlowFile> flowFileEntry : groupToFlowFileMap.entrySet()) {
final Group group = flowFileEntry.getKey();
final FlowFile flowFile = flowFileEntry.getValue();
final Map<String, String> attributes = new HashMap<>(2);
attributes.put(ROUTE_ATTRIBUTE_KEY, relationship.getName());
attributes.put(GROUP_ATTRIBUTE_KEY, StringUtils.join(group.getCapturedValues(), ", "));
logger.info("Created {} from {}; routing to relationship {}", new Object[] { flowFile, originalFlowFile, relationship.getName() });
FlowFile updatedFlowFile = session.putAllAttributes(flowFile, attributes);
session.getProvenanceReporter().route(updatedFlowFile, entry.getKey());
session.transfer(updatedFlowFile, entry.getKey());
}
}
// now transfer the original flow file
FlowFile flowFile = originalFlowFile;
logger.info("Routing {} to {}", new Object[] { flowFile, REL_ORIGINAL });
session.getProvenanceReporter().route(originalFlowFile, REL_ORIGINAL);
flowFile = session.putAttribute(flowFile, ROUTE_ATTRIBUTE_KEY, REL_ORIGINAL.getName());
session.transfer(flowFile, REL_ORIGINAL);
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class ScanContent method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
final SynchronousFileWatcher fileWatcher = fileWatcherRef.get();
try {
if (fileWatcher.checkAndReset()) {
reloadDictionary(context, true, logger);
}
} catch (final IOException e) {
throw new ProcessException(e);
}
Search<byte[]> search = searchRef.get();
try {
if (search == null) {
if (reloadDictionary(context, false, logger)) {
search = searchRef.get();
}
}
} catch (final IOException e) {
throw new ProcessException(e);
}
if (search == null) {
return;
}
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final Search<byte[]> finalSearch = search;
final AtomicReference<SearchTerm<byte[]>> termRef = new AtomicReference<>(null);
termRef.set(null);
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
final SearchState<byte[]> searchResult = finalSearch.search(in, false);
if (searchResult.foundMatch()) {
termRef.set(searchResult.getResults().keySet().iterator().next());
}
}
}
});
final SearchTerm<byte[]> matchingTerm = termRef.get();
if (matchingTerm == null) {
logger.info("Routing {} to 'unmatched'", new Object[] { flowFile });
session.getProvenanceReporter().route(flowFile, REL_NO_MATCH);
session.transfer(flowFile, REL_NO_MATCH);
} else {
final String matchingTermString = matchingTerm.toString(UTF8);
logger.info("Routing {} to 'matched' because it matched term {}", new Object[] { flowFile, matchingTermString });
flowFile = session.putAttribute(flowFile, MATCH_ATTRIBUTE_KEY, matchingTermString);
session.getProvenanceReporter().route(flowFile, REL_MATCH);
session.transfer(flowFile, REL_MATCH);
}
}
Aggregations