use of org.apache.nifi.processor.exception.FlowFileHandlingException in project nifi by apache.
the class StandardProcessSession method checkpoint.
public void checkpoint() {
verifyTaskActive();
resetWriteClaims(false);
closeStreams(openInputStreams, "committed", "input");
closeStreams(openOutputStreams, "committed", "output");
if (!readRecursionSet.isEmpty()) {
throw new IllegalStateException();
}
if (!writeRecursionSet.isEmpty()) {
throw new IllegalStateException();
}
if (this.checkpoint == null) {
this.checkpoint = new Checkpoint();
}
if (records.isEmpty()) {
LOG.trace("{} checkpointed, but no events were performed by this ProcessSession", this);
return;
}
// any drop event that is the result of an auto-terminate should happen at the very end, so we keep the
// records in a separate List so that they can be persisted to the Provenance Repo after all of the
// Processor-reported events.
List<ProvenanceEventRecord> autoTerminatedEvents = null;
// validate that all records have a transfer relationship for them and if so determine the destination node and clone as necessary
final Map<FlowFileRecord, StandardRepositoryRecord> toAdd = new HashMap<>();
for (final StandardRepositoryRecord record : records.values()) {
if (record.isMarkedForDelete()) {
continue;
}
final Relationship relationship = record.getTransferRelationship();
if (relationship == null) {
rollback();
throw new FlowFileHandlingException(record.getCurrent() + " transfer relationship not specified");
}
final List<Connection> destinations = new ArrayList<>(context.getConnections(relationship));
if (destinations.isEmpty() && !context.getConnectable().isAutoTerminated(relationship)) {
if (relationship != Relationship.SELF) {
rollback();
throw new FlowFileHandlingException(relationship + " does not have any destinations for " + context.getConnectable());
}
}
if (destinations.isEmpty() && relationship == Relationship.SELF) {
record.setDestination(record.getOriginalQueue());
} else if (destinations.isEmpty()) {
record.markForDelete();
if (autoTerminatedEvents == null) {
autoTerminatedEvents = new ArrayList<>();
}
final ProvenanceEventRecord dropEvent;
try {
dropEvent = provenanceReporter.generateDropEvent(record.getCurrent(), "Auto-Terminated by " + relationship.getName() + " Relationship");
autoTerminatedEvents.add(dropEvent);
} catch (final Exception e) {
LOG.warn("Unable to generate Provenance Event for {} on behalf of {} due to {}", record.getCurrent(), connectableDescription, e);
if (LOG.isDebugEnabled()) {
LOG.warn("", e);
}
}
} else {
// remove last element
final Connection finalDestination = destinations.remove(destinations.size() - 1);
record.setDestination(finalDestination.getFlowFileQueue());
incrementConnectionInputCounts(finalDestination, record);
for (final Connection destination : destinations) {
// iterate over remaining destinations and "clone" as needed
incrementConnectionInputCounts(destination, record);
final FlowFileRecord currRec = record.getCurrent();
final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec);
builder.id(context.getNextFlowFileSequence());
final String newUuid = UUID.randomUUID().toString();
builder.addAttribute(CoreAttributes.UUID.key(), newUuid);
final FlowFileRecord clone = builder.build();
final StandardRepositoryRecord newRecord = new StandardRepositoryRecord(destination.getFlowFileQueue());
provenanceReporter.clone(currRec, clone, false);
final ContentClaim claim = clone.getContentClaim();
if (claim != null) {
context.getContentRepository().incrementClaimaintCount(claim);
}
newRecord.setWorking(clone, Collections.<String, String>emptyMap());
newRecord.setDestination(destination.getFlowFileQueue());
newRecord.setTransferRelationship(record.getTransferRelationship());
// put the mapping into toAdd because adding to records now will cause a ConcurrentModificationException
toAdd.put(clone, newRecord);
}
}
}
records.putAll(toAdd);
toAdd.clear();
checkpoint.checkpoint(this, autoTerminatedEvents);
resetState();
}
use of org.apache.nifi.processor.exception.FlowFileHandlingException in project nifi by apache.
the class StandardProcessSession method clone.
@Override
public FlowFile clone(FlowFile example, final long offset, final long size) {
verifyTaskActive();
example = validateRecordState(example);
final StandardRepositoryRecord exampleRepoRecord = records.get(example);
final FlowFileRecord currRec = exampleRepoRecord.getCurrent();
final ContentClaim claim = exampleRepoRecord.getCurrentClaim();
if (offset + size > example.getSize()) {
throw new FlowFileHandlingException("Specified offset of " + offset + " and size " + size + " exceeds size of " + example.toString());
}
final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec);
builder.id(context.getNextFlowFileSequence());
builder.contentClaimOffset(currRec.getContentClaimOffset() + offset);
builder.size(size);
final String newUuid = UUID.randomUUID().toString();
builder.addAttribute(CoreAttributes.UUID.key(), newUuid);
final FlowFileRecord clone = builder.build();
if (claim != null) {
context.getContentRepository().incrementClaimaintCount(claim);
}
final StandardRepositoryRecord record = new StandardRepositoryRecord(null);
record.setWorking(clone, clone.getAttributes());
records.put(clone, record);
if (offset == 0L && size == example.getSize()) {
provenanceReporter.clone(example, clone);
} else {
registerForkEvent(example, clone);
}
return clone;
}
use of org.apache.nifi.processor.exception.FlowFileHandlingException in project nifi by apache.
the class ExtractTNEFAttachments method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final ComponentLog logger = getLogger();
final FlowFile originalFlowFile = session.get();
if (originalFlowFile == null) {
return;
}
final List<FlowFile> attachmentsList = new ArrayList<>();
final List<FlowFile> invalidFlowFilesList = new ArrayList<>();
final List<FlowFile> originalFlowFilesList = new ArrayList<>();
session.read(originalFlowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
Properties props = new Properties();
HMEFMessage hmefMessage = null;
// This will trigger an exception in case content is not a TNEF.
hmefMessage = new HMEFMessage(in);
// Add otiginal flowfile (may revert later on in case of errors) //
originalFlowFilesList.add(originalFlowFile);
if (hmefMessage != null) {
// Attachments isn empty, proceeding.
if (!hmefMessage.getAttachments().isEmpty()) {
final String originalFlowFileName = originalFlowFile.getAttribute(CoreAttributes.FILENAME.key());
try {
for (final Attachment attachment : hmefMessage.getAttachments()) {
FlowFile split = session.create(originalFlowFile);
final Map<String, String> attributes = new HashMap<>();
if (StringUtils.isNotBlank(attachment.getLongFilename())) {
attributes.put(CoreAttributes.FILENAME.key(), attachment.getFilename());
}
String parentUuid = originalFlowFile.getAttribute(CoreAttributes.UUID.key());
attributes.put(ATTACHMENT_ORIGINAL_UUID, parentUuid);
attributes.put(ATTACHMENT_ORIGINAL_FILENAME, originalFlowFileName);
// TODO: Extract Mime Type (HMEF doesn't seem to be able to get this info.
split = session.append(split, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write(attachment.getContents());
}
});
split = session.putAllAttributes(split, attributes);
attachmentsList.add(split);
}
} catch (FlowFileHandlingException e) {
// Something went wrong
// Removing splits that may have been created
session.remove(attachmentsList);
// Removing the original flow from its list
originalFlowFilesList.remove(originalFlowFile);
logger.error("Flowfile {} triggered error {} while processing message removing generated FlowFiles from sessions", new Object[] { originalFlowFile, e });
invalidFlowFilesList.add(originalFlowFile);
}
}
}
} catch (Exception e) {
// Another error hit...
// Removing the original flow from its list
originalFlowFilesList.remove(originalFlowFile);
logger.error("Could not parse the flowfile {} as an email, treating as failure", new Object[] { originalFlowFile, e });
// Message is invalid or triggered an error during parsing
invalidFlowFilesList.add(originalFlowFile);
}
}
});
session.transfer(attachmentsList, REL_ATTACHMENTS);
// As per above code, originalFlowfile may be routed to invalid or
// original depending on RFC2822 compliance.
session.transfer(invalidFlowFilesList, REL_FAILURE);
session.transfer(originalFlowFilesList, REL_ORIGINAL);
// check if attachments have been extracted
if (attachmentsList.size() != 0) {
if (attachmentsList.size() > 10) {
// If more than 10, summarise log
logger.info("Split {} into {} files", new Object[] { originalFlowFile, attachmentsList.size() });
} else {
// Otherwise be more verbose and list each individual split
logger.info("Split {} into {} files: {}", new Object[] { originalFlowFile, attachmentsList.size(), attachmentsList });
}
}
}
use of org.apache.nifi.processor.exception.FlowFileHandlingException in project nifi by apache.
the class MockProcessSession method closeStreams.
private void closeStreams(final Map<FlowFile, ? extends Closeable> streamMap, final boolean enforceClosed) {
// avoid ConcurrentModificationException by creating a copy of the List
final Map<FlowFile, ? extends Closeable> openStreamCopy = new HashMap<>(streamMap);
for (final Map.Entry<FlowFile, ? extends Closeable> entry : openStreamCopy.entrySet()) {
final FlowFile flowFile = entry.getKey();
final Closeable openStream = entry.getValue();
try {
openStream.close();
} catch (IOException e) {
throw new FlowFileAccessException("Failed to close stream for " + flowFile, e);
}
if (enforceClosed) {
throw new FlowFileHandlingException("Cannot commit session because the following streams were created via " + "calls to ProcessSession.read(FlowFile) or ProcessSession.write(FlowFile) and never closed: " + streamMap);
}
}
}
Aggregations