Search in sources :

Example 86 with Relationship

use of org.apache.nifi.processor.Relationship in project nifi by apache.

the class RouteHL7 method onPropertyModified.

@Override
public void onPropertyModified(final PropertyDescriptor descriptor, final String oldValue, final String newValue) {
    if (!descriptor.isDynamic()) {
        return;
    }
    final Map<Relationship, HL7Query> updatedQueryMap = new HashMap<>(queries);
    final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
    if (newValue == null) {
        updatedQueryMap.remove(relationship);
    } else {
        final HL7Query query = HL7Query.compile(newValue);
        updatedQueryMap.put(relationship, query);
    }
    this.queries = updatedQueryMap;
}
Also used : HL7Query(org.apache.nifi.hl7.query.HL7Query) HashMap(java.util.HashMap) Relationship(org.apache.nifi.processor.Relationship)

Example 87 with Relationship

use of org.apache.nifi.processor.Relationship in project nifi by apache.

the class PutFileTransfer method identifyAndResolveConflictFile.

// Attempts to identify naming or content issues with files before they are transferred.
private ConflictResult identifyAndResolveConflictFile(final String conflictResolutionType, final T transfer, final String path, final FlowFile flowFile, final boolean rejectZeroByteFiles, final ComponentLog logger) throws IOException {
    Relationship destinationRelationship = REL_SUCCESS;
    String fileName = flowFile.getAttribute(CoreAttributes.FILENAME.key());
    boolean transferFile = true;
    boolean penalizeFile = false;
    // Reject files that are zero bytes or less
    if (rejectZeroByteFiles) {
        final long sizeInBytes = flowFile.getSize();
        if (sizeInBytes == 0) {
            logger.warn("Rejecting {} because it is zero bytes", new Object[] { flowFile });
            return new ConflictResult(REL_REJECT, false, fileName, true);
        }
    }
    // Second, check if the user doesn't care about detecting naming conflicts ahead of time
    if (conflictResolutionType.equalsIgnoreCase(FileTransfer.CONFLICT_RESOLUTION_NONE)) {
        return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile);
    }
    final FileInfo remoteFileInfo = transfer.getRemoteFileInfo(flowFile, path, fileName);
    if (remoteFileInfo == null) {
        return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile);
    }
    if (remoteFileInfo.isDirectory()) {
        logger.warn("Resolving conflict by rejecting {} due to conflicting filename with a directory or file already on remote server", new Object[] { flowFile });
        return new ConflictResult(REL_REJECT, false, fileName, false);
    }
    logger.info("Discovered a filename conflict on the remote server for {} so handling using configured Conflict Resolution of {}", new Object[] { flowFile, conflictResolutionType });
    switch(conflictResolutionType.toUpperCase()) {
        case FileTransfer.CONFLICT_RESOLUTION_REJECT:
            destinationRelationship = REL_REJECT;
            transferFile = false;
            penalizeFile = false;
            logger.warn("Resolving conflict by rejecting {} due to conflicting filename with a directory or file already on remote server", new Object[] { flowFile });
            break;
        case FileTransfer.CONFLICT_RESOLUTION_REPLACE:
            transfer.deleteFile(flowFile, path, fileName);
            destinationRelationship = REL_SUCCESS;
            transferFile = true;
            penalizeFile = false;
            logger.info("Resolving filename conflict for {} with remote server by deleting remote file and replacing with flow file", new Object[] { flowFile });
            break;
        case FileTransfer.CONFLICT_RESOLUTION_RENAME:
            boolean uniqueNameGenerated = false;
            for (int i = 1; i < 100 && !uniqueNameGenerated; i++) {
                String possibleFileName = i + "." + fileName;
                final FileInfo renamedFileInfo = transfer.getRemoteFileInfo(flowFile, path, possibleFileName);
                uniqueNameGenerated = (renamedFileInfo == null);
                if (uniqueNameGenerated) {
                    fileName = possibleFileName;
                    logger.info("Attempting to resolve filename conflict for {} on the remote server by using a newly generated filename of: {}", new Object[] { flowFile, fileName });
                    destinationRelationship = REL_SUCCESS;
                    transferFile = true;
                    penalizeFile = false;
                    break;
                }
            }
            if (!uniqueNameGenerated) {
                destinationRelationship = REL_REJECT;
                transferFile = false;
                penalizeFile = false;
                logger.warn("Could not determine a unique name after 99 attempts for.  Switching resolution mode to REJECT for " + flowFile);
            }
            break;
        case FileTransfer.CONFLICT_RESOLUTION_IGNORE:
            destinationRelationship = REL_SUCCESS;
            transferFile = false;
            penalizeFile = false;
            logger.info("Resolving conflict for {}  by not transferring file and and still considering the process a success.", new Object[] { flowFile });
            break;
        case FileTransfer.CONFLICT_RESOLUTION_FAIL:
            destinationRelationship = REL_FAILURE;
            transferFile = false;
            penalizeFile = true;
            logger.warn("Resolved filename conflict for {} as configured by routing to FAILURE relationship.", new Object[] { flowFile });
        default:
            break;
    }
    return new ConflictResult(destinationRelationship, transferFile, fileName, penalizeFile);
}
Also used : FileInfo(org.apache.nifi.processors.standard.util.FileInfo) Relationship(org.apache.nifi.processor.Relationship)

Example 88 with Relationship

use of org.apache.nifi.processor.Relationship in project nifi by apache.

the class PutSQL method isFragmentedTransactionReady.

/**
 * Determines which relationship the given FlowFiles should go to, based on a transaction timing out or
 * transaction information not being present. If the FlowFiles should be processed and not transferred
 * to any particular relationship yet, will return <code>null</code>
 *
 * @param flowFiles the FlowFiles whose relationship is to be determined
 * @param transactionTimeoutMillis the maximum amount of time (in milliseconds) that we should wait
 *            for all FlowFiles in a transaction to be present before routing to failure
 * @return the appropriate relationship to route the FlowFiles to, or <code>null</code> if the FlowFiles
 *         should instead be processed
 */
boolean isFragmentedTransactionReady(final List<FlowFile> flowFiles, final Long transactionTimeoutMillis) throws IllegalArgumentException {
    int selectedNumFragments = 0;
    final BitSet bitSet = new BitSet();
    BiFunction<String, Object[], IllegalArgumentException> illegal = (s, objects) -> new IllegalArgumentException(String.format(s, objects));
    for (final FlowFile flowFile : flowFiles) {
        final String fragmentCount = flowFile.getAttribute(FRAGMENT_COUNT_ATTR);
        if (fragmentCount == null && flowFiles.size() == 1) {
            return true;
        } else if (fragmentCount == null) {
            throw illegal.apply("Cannot process %s because there are %d FlowFiles with the same fragment.identifier " + "attribute but not all FlowFiles have a fragment.count attribute", new Object[] { flowFile, flowFiles.size() });
        }
        final int numFragments;
        try {
            numFragments = Integer.parseInt(fragmentCount);
        } catch (final NumberFormatException nfe) {
            throw illegal.apply("Cannot process %s because the fragment.count attribute has a value of '%s', which is not an integer", new Object[] { flowFile, fragmentCount });
        }
        if (numFragments < 1) {
            throw illegal.apply("Cannot process %s because the fragment.count attribute has a value of '%s', which is not a positive integer", new Object[] { flowFile, fragmentCount });
        }
        if (selectedNumFragments == 0) {
            selectedNumFragments = numFragments;
        } else if (numFragments != selectedNumFragments) {
            throw illegal.apply("Cannot process %s because the fragment.count attribute has different values for different FlowFiles with the same fragment.identifier", new Object[] { flowFile });
        }
        final String fragmentIndex = flowFile.getAttribute(FRAGMENT_INDEX_ATTR);
        if (fragmentIndex == null) {
            throw illegal.apply("Cannot process %s because the fragment.index attribute is missing", new Object[] { flowFile });
        }
        final int idx;
        try {
            idx = Integer.parseInt(fragmentIndex);
        } catch (final NumberFormatException nfe) {
            throw illegal.apply("Cannot process %s because the fragment.index attribute has a value of '%s', which is not an integer", new Object[] { flowFile, fragmentIndex });
        }
        if (idx < 0) {
            throw illegal.apply("Cannot process %s because the fragment.index attribute has a value of '%s', which is not a positive integer", new Object[] { flowFile, fragmentIndex });
        }
        if (bitSet.get(idx)) {
            throw illegal.apply("Cannot process %s because it has the same value for the fragment.index attribute as another FlowFile with the same fragment.identifier", new Object[] { flowFile });
        }
        bitSet.set(idx);
    }
    if (selectedNumFragments == flowFiles.size()) {
        // no relationship to route FlowFiles to yet - process the FlowFiles.
        return true;
    }
    long latestQueueTime = 0L;
    for (final FlowFile flowFile : flowFiles) {
        if (flowFile.getLastQueueDate() != null && flowFile.getLastQueueDate() > latestQueueTime) {
            latestQueueTime = flowFile.getLastQueueDate();
        }
    }
    if (transactionTimeoutMillis != null) {
        if (latestQueueTime > 0L && System.currentTimeMillis() - latestQueueTime > transactionTimeoutMillis) {
            throw illegal.apply("The transaction timeout has expired for the following FlowFiles; they will be routed to failure: %s", new Object[] { flowFiles });
        }
    }
    getLogger().debug("Not enough FlowFiles for transaction. Returning all FlowFiles to queue");
    // not enough FlowFiles for this transaction. Return them all to queue.
    return false;
}
Also used : StandardValidators(org.apache.nifi.processor.util.StandardValidators) FetchFlowFiles(org.apache.nifi.processor.util.pattern.PartialFunctions.FetchFlowFiles) FlowFileFilter(org.apache.nifi.processor.FlowFileFilter) SQLNonTransientException(java.sql.SQLNonTransientException) Connection(java.sql.Connection) BiFunction(java.util.function.BiFunction) ExceptionHandler.createOnError(org.apache.nifi.processor.util.pattern.ExceptionHandler.createOnError) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) ErrorTypes(org.apache.nifi.processor.util.pattern.ErrorTypes) RoutingResult(org.apache.nifi.processor.util.pattern.RoutingResult) WritesAttributes(org.apache.nifi.annotation.behavior.WritesAttributes) ResultSet(java.sql.ResultSet) Map(java.util.Map) ReadsAttributes(org.apache.nifi.annotation.behavior.ReadsAttributes) ExceptionHandler(org.apache.nifi.processor.util.pattern.ExceptionHandler) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) PutGroup(org.apache.nifi.processor.util.pattern.PutGroup) FlowFile(org.apache.nifi.flowfile.FlowFile) FragmentAttributes(org.apache.nifi.flowfile.attributes.FragmentAttributes) Set(java.util.Set) WritesAttribute(org.apache.nifi.annotation.behavior.WritesAttribute) PreparedStatement(java.sql.PreparedStatement) StandardCharsets(java.nio.charset.StandardCharsets) InputRequirement(org.apache.nifi.annotation.behavior.InputRequirement) List(java.util.List) JdbcCommon(org.apache.nifi.processors.standard.util.JdbcCommon) Tags(org.apache.nifi.annotation.documentation.Tags) DBCPService(org.apache.nifi.dbcp.DBCPService) ReadsAttribute(org.apache.nifi.annotation.behavior.ReadsAttribute) RollbackOnFailure(org.apache.nifi.processor.util.pattern.RollbackOnFailure) CapabilityDescription(org.apache.nifi.annotation.documentation.CapabilityDescription) BatchUpdateException(java.sql.BatchUpdateException) HashMap(java.util.HashMap) ProcessException(org.apache.nifi.processor.exception.ProcessException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SQLException(java.sql.SQLException) Relationship(org.apache.nifi.processor.Relationship) Requirement(org.apache.nifi.annotation.behavior.InputRequirement.Requirement) AbstractSessionFactoryProcessor(org.apache.nifi.processor.AbstractSessionFactoryProcessor) PartialFunctions(org.apache.nifi.processor.util.pattern.PartialFunctions) FlowFileGroup(org.apache.nifi.processor.util.pattern.PartialFunctions.FlowFileGroup) ProcessContext(org.apache.nifi.processor.ProcessContext) ProcessSession(org.apache.nifi.processor.ProcessSession) IOException(java.io.IOException) SeeAlso(org.apache.nifi.annotation.documentation.SeeAlso) ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) TimeUnit(java.util.concurrent.TimeUnit) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled) SupportsBatching(org.apache.nifi.annotation.behavior.SupportsBatching) StreamUtils(org.apache.nifi.stream.io.StreamUtils) Statement(java.sql.Statement) BitSet(java.util.BitSet) Comparator(java.util.Comparator) InputStream(java.io.InputStream) FlowFile(org.apache.nifi.flowfile.FlowFile) BitSet(java.util.BitSet)

Example 89 with Relationship

use of org.apache.nifi.processor.Relationship in project nifi by apache.

the class QueryRecord method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final FlowFile original = session.get();
    if (original == null) {
        return;
    }
    final StopWatch stopWatch = new StopWatch(true);
    final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class);
    final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
    final Map<FlowFile, Relationship> transformedFlowFiles = new HashMap<>();
    final Set<FlowFile> createdFlowFiles = new HashSet<>();
    // Determine the Record Reader's schema
    final RecordSchema readerSchema;
    try (final InputStream rawIn = session.read(original)) {
        final Map<String, String> originalAttributes = original.getAttributes();
        final RecordReader reader = recordReaderFactory.createRecordReader(originalAttributes, rawIn, getLogger());
        final RecordSchema inputSchema = reader.getSchema();
        readerSchema = recordSetWriterFactory.getSchema(originalAttributes, inputSchema);
    } catch (final Exception e) {
        getLogger().error("Failed to determine Record Schema from {}; routing to failure", new Object[] { original, e });
        session.transfer(original, REL_FAILURE);
        return;
    }
    // Determine the schema for writing the data
    final Map<String, String> originalAttributes = original.getAttributes();
    int recordsRead = 0;
    try {
        for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
            if (!descriptor.isDynamic()) {
                continue;
            }
            final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
            // We have to fork a child because we may need to read the input FlowFile more than once,
            // and we cannot call session.read() on the original FlowFile while we are within a write
            // callback for the original FlowFile.
            FlowFile transformed = session.create(original);
            boolean flowFileRemoved = false;
            try {
                final String sql = context.getProperty(descriptor).evaluateAttributeExpressions(original).getValue();
                final AtomicReference<WriteResult> writeResultRef = new AtomicReference<>();
                final QueryResult queryResult;
                if (context.getProperty(CACHE_SCHEMA).asBoolean()) {
                    queryResult = queryWithCache(session, original, sql, context, recordReaderFactory);
                } else {
                    queryResult = query(session, original, sql, context, recordReaderFactory);
                }
                final AtomicReference<String> mimeTypeRef = new AtomicReference<>();
                try {
                    final ResultSet rs = queryResult.getResultSet();
                    transformed = session.write(transformed, new OutputStreamCallback() {

                        @Override
                        public void process(final OutputStream out) throws IOException {
                            final ResultSetRecordSet recordSet;
                            final RecordSchema writeSchema;
                            try {
                                recordSet = new ResultSetRecordSet(rs, readerSchema);
                                final RecordSchema resultSetSchema = recordSet.getSchema();
                                writeSchema = recordSetWriterFactory.getSchema(originalAttributes, resultSetSchema);
                            } catch (final SQLException | SchemaNotFoundException e) {
                                throw new ProcessException(e);
                            }
                            try (final RecordSetWriter resultSetWriter = recordSetWriterFactory.createWriter(getLogger(), writeSchema, out)) {
                                writeResultRef.set(resultSetWriter.write(recordSet));
                                mimeTypeRef.set(resultSetWriter.getMimeType());
                            } catch (final Exception e) {
                                throw new IOException(e);
                            }
                        }
                    });
                } finally {
                    closeQuietly(queryResult);
                }
                recordsRead = Math.max(recordsRead, queryResult.getRecordsRead());
                final WriteResult result = writeResultRef.get();
                if (result.getRecordCount() == 0 && !context.getProperty(INCLUDE_ZERO_RECORD_FLOWFILES).asBoolean()) {
                    session.remove(transformed);
                    flowFileRemoved = true;
                    transformedFlowFiles.remove(transformed);
                    getLogger().info("Transformed {} but the result contained no data so will not pass on a FlowFile", new Object[] { original });
                } else {
                    final Map<String, String> attributesToAdd = new HashMap<>();
                    if (result.getAttributes() != null) {
                        attributesToAdd.putAll(result.getAttributes());
                    }
                    attributesToAdd.put(CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());
                    attributesToAdd.put("record.count", String.valueOf(result.getRecordCount()));
                    transformed = session.putAllAttributes(transformed, attributesToAdd);
                    transformedFlowFiles.put(transformed, relationship);
                    session.adjustCounter("Records Written", result.getRecordCount(), false);
                }
            } finally {
                // Ensure that we have the FlowFile in the set in case we throw any Exception
                if (!flowFileRemoved) {
                    createdFlowFiles.add(transformed);
                }
            }
        }
        final long elapsedMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);
        if (transformedFlowFiles.size() > 0) {
            session.getProvenanceReporter().fork(original, transformedFlowFiles.keySet(), elapsedMillis);
            for (final Map.Entry<FlowFile, Relationship> entry : transformedFlowFiles.entrySet()) {
                final FlowFile transformed = entry.getKey();
                final Relationship relationship = entry.getValue();
                session.getProvenanceReporter().route(transformed, relationship);
                session.transfer(transformed, relationship);
            }
        }
        getLogger().info("Successfully queried {} in {} millis", new Object[] { original, elapsedMillis });
        session.transfer(original, REL_ORIGINAL);
    } catch (final SQLException e) {
        getLogger().error("Unable to query {} due to {}", new Object[] { original, e.getCause() == null ? e : e.getCause() });
        session.remove(createdFlowFiles);
        session.transfer(original, REL_FAILURE);
    } catch (final Exception e) {
        getLogger().error("Unable to query {} due to {}", new Object[] { original, e });
        session.remove(createdFlowFiles);
        session.transfer(original, REL_FAILURE);
    }
    session.adjustCounter("Records Read", recordsRead, false);
}
Also used : HashMap(java.util.HashMap) SQLException(java.sql.SQLException) RecordReader(org.apache.nifi.serialization.RecordReader) OutputStream(java.io.OutputStream) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) ResultSet(java.sql.ResultSet) OutputStreamCallback(org.apache.nifi.processor.io.OutputStreamCallback) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) HashSet(java.util.HashSet) FlowFile(org.apache.nifi.flowfile.FlowFile) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) InputStream(java.io.InputStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) ResultSetRecordSet(org.apache.nifi.serialization.record.ResultSetRecordSet) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException) ProcessException(org.apache.nifi.processor.exception.ProcessException) SQLException(java.sql.SQLException) IOException(java.io.IOException) StopWatch(org.apache.nifi.util.StopWatch) RecordReaderFactory(org.apache.nifi.serialization.RecordReaderFactory) ProcessException(org.apache.nifi.processor.exception.ProcessException) WriteResult(org.apache.nifi.serialization.WriteResult) Relationship(org.apache.nifi.processor.Relationship) DynamicRelationship(org.apache.nifi.annotation.behavior.DynamicRelationship) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException) Map(java.util.Map) HashMap(java.util.HashMap)

Example 90 with Relationship

use of org.apache.nifi.processor.Relationship in project nifi by apache.

the class RouteOnAttribute method onScheduled.

/**
 * When this processor is scheduled, update the dynamic properties into the map
 * for quick access during each onTrigger call
 * @param context ProcessContext used to retrieve dynamic properties
 */
@OnScheduled
public void onScheduled(final ProcessContext context) {
    final Map<Relationship, PropertyValue> newPropertyMap = new HashMap<>();
    for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
        if (!descriptor.isDynamic()) {
            continue;
        }
        getLogger().debug("Adding new dynamic property: {}", new Object[] { descriptor });
        newPropertyMap.put(new Relationship.Builder().name(descriptor.getName()).build(), context.getProperty(descriptor));
    }
    this.propertyMap = newPropertyMap;
}
Also used : PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) HashMap(java.util.HashMap) Relationship(org.apache.nifi.processor.Relationship) DynamicRelationship(org.apache.nifi.annotation.behavior.DynamicRelationship) PropertyValue(org.apache.nifi.components.PropertyValue) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled)

Aggregations

Relationship (org.apache.nifi.processor.Relationship)106 ArrayList (java.util.ArrayList)41 HashSet (java.util.HashSet)40 HashMap (java.util.HashMap)32 FlowFile (org.apache.nifi.flowfile.FlowFile)32 Map (java.util.Map)31 IOException (java.io.IOException)26 PropertyDescriptor (org.apache.nifi.components.PropertyDescriptor)26 Test (org.junit.Test)23 List (java.util.List)20 Set (java.util.Set)19 Connection (org.apache.nifi.connectable.Connection)18 TestRunner (org.apache.nifi.util.TestRunner)18 ProcessException (org.apache.nifi.processor.exception.ProcessException)17 ProcessSession (org.apache.nifi.processor.ProcessSession)15 InputStream (java.io.InputStream)14 DynamicRelationship (org.apache.nifi.annotation.behavior.DynamicRelationship)12 Processor (org.apache.nifi.processor.Processor)12 Collections (java.util.Collections)11 AtomicLong (java.util.concurrent.atomic.AtomicLong)10