use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class RouteHL7 method onPropertyModified.
@Override
public void onPropertyModified(final PropertyDescriptor descriptor, final String oldValue, final String newValue) {
if (!descriptor.isDynamic()) {
return;
}
final Map<Relationship, HL7Query> updatedQueryMap = new HashMap<>(queries);
final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
if (newValue == null) {
updatedQueryMap.remove(relationship);
} else {
final HL7Query query = HL7Query.compile(newValue);
updatedQueryMap.put(relationship, query);
}
this.queries = updatedQueryMap;
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class PutFileTransfer method identifyAndResolveConflictFile.
// Attempts to identify naming or content issues with files before they are transferred.
private ConflictResult identifyAndResolveConflictFile(final String conflictResolutionType, final T transfer, final String path, final FlowFile flowFile, final boolean rejectZeroByteFiles, final ComponentLog logger) throws IOException {
Relationship destinationRelationship = REL_SUCCESS;
String fileName = flowFile.getAttribute(CoreAttributes.FILENAME.key());
boolean transferFile = true;
boolean penalizeFile = false;
// Reject files that are zero bytes or less
if (rejectZeroByteFiles) {
final long sizeInBytes = flowFile.getSize();
if (sizeInBytes == 0) {
logger.warn("Rejecting {} because it is zero bytes", new Object[] { flowFile });
return new ConflictResult(REL_REJECT, false, fileName, true);
}
}
// Second, check if the user doesn't care about detecting naming conflicts ahead of time
if (conflictResolutionType.equalsIgnoreCase(FileTransfer.CONFLICT_RESOLUTION_NONE)) {
return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile);
}
final FileInfo remoteFileInfo = transfer.getRemoteFileInfo(flowFile, path, fileName);
if (remoteFileInfo == null) {
return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile);
}
if (remoteFileInfo.isDirectory()) {
logger.warn("Resolving conflict by rejecting {} due to conflicting filename with a directory or file already on remote server", new Object[] { flowFile });
return new ConflictResult(REL_REJECT, false, fileName, false);
}
logger.info("Discovered a filename conflict on the remote server for {} so handling using configured Conflict Resolution of {}", new Object[] { flowFile, conflictResolutionType });
switch(conflictResolutionType.toUpperCase()) {
case FileTransfer.CONFLICT_RESOLUTION_REJECT:
destinationRelationship = REL_REJECT;
transferFile = false;
penalizeFile = false;
logger.warn("Resolving conflict by rejecting {} due to conflicting filename with a directory or file already on remote server", new Object[] { flowFile });
break;
case FileTransfer.CONFLICT_RESOLUTION_REPLACE:
transfer.deleteFile(flowFile, path, fileName);
destinationRelationship = REL_SUCCESS;
transferFile = true;
penalizeFile = false;
logger.info("Resolving filename conflict for {} with remote server by deleting remote file and replacing with flow file", new Object[] { flowFile });
break;
case FileTransfer.CONFLICT_RESOLUTION_RENAME:
boolean uniqueNameGenerated = false;
for (int i = 1; i < 100 && !uniqueNameGenerated; i++) {
String possibleFileName = i + "." + fileName;
final FileInfo renamedFileInfo = transfer.getRemoteFileInfo(flowFile, path, possibleFileName);
uniqueNameGenerated = (renamedFileInfo == null);
if (uniqueNameGenerated) {
fileName = possibleFileName;
logger.info("Attempting to resolve filename conflict for {} on the remote server by using a newly generated filename of: {}", new Object[] { flowFile, fileName });
destinationRelationship = REL_SUCCESS;
transferFile = true;
penalizeFile = false;
break;
}
}
if (!uniqueNameGenerated) {
destinationRelationship = REL_REJECT;
transferFile = false;
penalizeFile = false;
logger.warn("Could not determine a unique name after 99 attempts for. Switching resolution mode to REJECT for " + flowFile);
}
break;
case FileTransfer.CONFLICT_RESOLUTION_IGNORE:
destinationRelationship = REL_SUCCESS;
transferFile = false;
penalizeFile = false;
logger.info("Resolving conflict for {} by not transferring file and and still considering the process a success.", new Object[] { flowFile });
break;
case FileTransfer.CONFLICT_RESOLUTION_FAIL:
destinationRelationship = REL_FAILURE;
transferFile = false;
penalizeFile = true;
logger.warn("Resolved filename conflict for {} as configured by routing to FAILURE relationship.", new Object[] { flowFile });
default:
break;
}
return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile);
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class PutSQL method isFragmentedTransactionReady.
/**
* Determines which relationship the given FlowFiles should go to, based on a transaction timing out or
* transaction information not being present. If the FlowFiles should be processed and not transferred
* to any particular relationship yet, will return <code>null</code>
*
* @param flowFiles the FlowFiles whose relationship is to be determined
* @param transactionTimeoutMillis the maximum amount of time (in milliseconds) that we should wait
* for all FlowFiles in a transaction to be present before routing to failure
* @return the appropriate relationship to route the FlowFiles to, or <code>null</code> if the FlowFiles
* should instead be processed
*/
boolean isFragmentedTransactionReady(final List<FlowFile> flowFiles, final Long transactionTimeoutMillis) throws IllegalArgumentException {
int selectedNumFragments = 0;
final BitSet bitSet = new BitSet();
BiFunction<String, Object[], IllegalArgumentException> illegal = (s, objects) -> new IllegalArgumentException(String.format(s, objects));
for (final FlowFile flowFile : flowFiles) {
final String fragmentCount = flowFile.getAttribute(FRAGMENT_COUNT_ATTR);
if (fragmentCount == null && flowFiles.size() == 1) {
return true;
} else if (fragmentCount == null) {
throw illegal.apply("Cannot process %s because there are %d FlowFiles with the same fragment.identifier " + "attribute but not all FlowFiles have a fragment.count attribute", new Object[] { flowFile, flowFiles.size() });
}
final int numFragments;
try {
numFragments = Integer.parseInt(fragmentCount);
} catch (final NumberFormatException nfe) {
throw illegal.apply("Cannot process %s because the fragment.count attribute has a value of '%s', which is not an integer", new Object[] { flowFile, fragmentCount });
}
if (numFragments < 1) {
throw illegal.apply("Cannot process %s because the fragment.count attribute has a value of '%s', which is not a positive integer", new Object[] { flowFile, fragmentCount });
}
if (selectedNumFragments == 0) {
selectedNumFragments = numFragments;
} else if (numFragments != selectedNumFragments) {
throw illegal.apply("Cannot process %s because the fragment.count attribute has different values for different FlowFiles with the same fragment.identifier", new Object[] { flowFile });
}
final String fragmentIndex = flowFile.getAttribute(FRAGMENT_INDEX_ATTR);
if (fragmentIndex == null) {
throw illegal.apply("Cannot process %s because the fragment.index attribute is missing", new Object[] { flowFile });
}
final int idx;
try {
idx = Integer.parseInt(fragmentIndex);
} catch (final NumberFormatException nfe) {
throw illegal.apply("Cannot process %s because the fragment.index attribute has a value of '%s', which is not an integer", new Object[] { flowFile, fragmentIndex });
}
if (idx < 0) {
throw illegal.apply("Cannot process %s because the fragment.index attribute has a value of '%s', which is not a positive integer", new Object[] { flowFile, fragmentIndex });
}
if (bitSet.get(idx)) {
throw illegal.apply("Cannot process %s because it has the same value for the fragment.index attribute as another FlowFile with the same fragment.identifier", new Object[] { flowFile });
}
bitSet.set(idx);
}
if (selectedNumFragments == flowFiles.size()) {
// no relationship to route FlowFiles to yet - process the FlowFiles.
return true;
}
long latestQueueTime = 0L;
for (final FlowFile flowFile : flowFiles) {
if (flowFile.getLastQueueDate() != null && flowFile.getLastQueueDate() > latestQueueTime) {
latestQueueTime = flowFile.getLastQueueDate();
}
}
if (transactionTimeoutMillis != null) {
if (latestQueueTime > 0L && System.currentTimeMillis() - latestQueueTime > transactionTimeoutMillis) {
throw illegal.apply("The transaction timeout has expired for the following FlowFiles; they will be routed to failure: %s", new Object[] { flowFiles });
}
}
getLogger().debug("Not enough FlowFiles for transaction. Returning all FlowFiles to queue");
// not enough FlowFiles for this transaction. Return them all to queue.
return false;
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class QueryRecord method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final FlowFile original = session.get();
if (original == null) {
return;
}
final StopWatch stopWatch = new StopWatch(true);
final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class);
final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
final Map<FlowFile, Relationship> transformedFlowFiles = new HashMap<>();
final Set<FlowFile> createdFlowFiles = new HashSet<>();
// Determine the Record Reader's schema
final RecordSchema readerSchema;
try (final InputStream rawIn = session.read(original)) {
final Map<String, String> originalAttributes = original.getAttributes();
final RecordReader reader = recordReaderFactory.createRecordReader(originalAttributes, rawIn, getLogger());
final RecordSchema inputSchema = reader.getSchema();
readerSchema = recordSetWriterFactory.getSchema(originalAttributes, inputSchema);
} catch (final Exception e) {
getLogger().error("Failed to determine Record Schema from {}; routing to failure", new Object[] { original, e });
session.transfer(original, REL_FAILURE);
return;
}
// Determine the schema for writing the data
final Map<String, String> originalAttributes = original.getAttributes();
int recordsRead = 0;
try {
for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
if (!descriptor.isDynamic()) {
continue;
}
final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
// We have to fork a child because we may need to read the input FlowFile more than once,
// and we cannot call session.read() on the original FlowFile while we are within a write
// callback for the original FlowFile.
FlowFile transformed = session.create(original);
boolean flowFileRemoved = false;
try {
final String sql = context.getProperty(descriptor).evaluateAttributeExpressions(original).getValue();
final AtomicReference<WriteResult> writeResultRef = new AtomicReference<>();
final QueryResult queryResult;
if (context.getProperty(CACHE_SCHEMA).asBoolean()) {
queryResult = queryWithCache(session, original, sql, context, recordReaderFactory);
} else {
queryResult = query(session, original, sql, context, recordReaderFactory);
}
final AtomicReference<String> mimeTypeRef = new AtomicReference<>();
try {
final ResultSet rs = queryResult.getResultSet();
transformed = session.write(transformed, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
final ResultSetRecordSet recordSet;
final RecordSchema writeSchema;
try {
recordSet = new ResultSetRecordSet(rs, readerSchema);
final RecordSchema resultSetSchema = recordSet.getSchema();
writeSchema = recordSetWriterFactory.getSchema(originalAttributes, resultSetSchema);
} catch (final SQLException | SchemaNotFoundException e) {
throw new ProcessException(e);
}
try (final RecordSetWriter resultSetWriter = recordSetWriterFactory.createWriter(getLogger(), writeSchema, out)) {
writeResultRef.set(resultSetWriter.write(recordSet));
mimeTypeRef.set(resultSetWriter.getMimeType());
} catch (final Exception e) {
throw new IOException(e);
}
}
});
} finally {
closeQuietly(queryResult);
}
recordsRead = Math.max(recordsRead, queryResult.getRecordsRead());
final WriteResult result = writeResultRef.get();
if (result.getRecordCount() == 0 && !context.getProperty(INCLUDE_ZERO_RECORD_FLOWFILES).asBoolean()) {
session.remove(transformed);
flowFileRemoved = true;
transformedFlowFiles.remove(transformed);
getLogger().info("Transformed {} but the result contained no data so will not pass on a FlowFile", new Object[] { original });
} else {
final Map<String, String> attributesToAdd = new HashMap<>();
if (result.getAttributes() != null) {
attributesToAdd.putAll(result.getAttributes());
}
attributesToAdd.put(CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());
attributesToAdd.put("record.count", String.valueOf(result.getRecordCount()));
transformed = session.putAllAttributes(transformed, attributesToAdd);
transformedFlowFiles.put(transformed, relationship);
session.adjustCounter("Records Written", result.getRecordCount(), false);
}
} finally {
// Ensure that we have the FlowFile in the set in case we throw any Exception
if (!flowFileRemoved) {
createdFlowFiles.add(transformed);
}
}
}
final long elapsedMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);
if (transformedFlowFiles.size() > 0) {
session.getProvenanceReporter().fork(original, transformedFlowFiles.keySet(), elapsedMillis);
for (final Map.Entry<FlowFile, Relationship> entry : transformedFlowFiles.entrySet()) {
final FlowFile transformed = entry.getKey();
final Relationship relationship = entry.getValue();
session.getProvenanceReporter().route(transformed, relationship);
session.transfer(transformed, relationship);
}
}
getLogger().info("Successfully queried {} in {} millis", new Object[] { original, elapsedMillis });
session.transfer(original, REL_ORIGINAL);
} catch (final SQLException e) {
getLogger().error("Unable to query {} due to {}", new Object[] { original, e.getCause() == null ? e : e.getCause() });
session.remove(createdFlowFiles);
session.transfer(original, REL_FAILURE);
} catch (final Exception e) {
getLogger().error("Unable to query {} due to {}", new Object[] { original, e });
session.remove(createdFlowFiles);
session.transfer(original, REL_FAILURE);
}
session.adjustCounter("Records Read", recordsRead, false);
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class RouteOnAttribute method onScheduled.
/**
* When this processor is scheduled, update the dynamic properties into the map
* for quick access during each onTrigger call
* @param context ProcessContext used to retrieve dynamic properties
*/
@OnScheduled
public void onScheduled(final ProcessContext context) {
final Map<Relationship, PropertyValue> newPropertyMap = new HashMap<>();
for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
if (!descriptor.isDynamic()) {
continue;
}
getLogger().debug("Adding new dynamic property: {}", new Object[] { descriptor });
newPropertyMap.put(new Relationship.Builder().name(descriptor.getName()).build(), context.getProperty(descriptor));
}
this.propertyMap = newPropertyMap;
}
Aggregations