Search in sources :

Example 91 with ProcessContext

use of org.apache.nifi.processor.ProcessContext in project nifi by apache.

the class TestPublishKafkaRecord_0_11 method setup.

@Before
public void setup() throws InitializationException, IOException {
    mockPool = mock(PublisherPool.class);
    mockLease = mock(PublisherLease.class);
    Mockito.doCallRealMethod().when(mockLease).publish(any(FlowFile.class), any(RecordSet.class), any(RecordSetWriterFactory.class), any(RecordSchema.class), any(String.class), any(String.class));
    when(mockPool.obtainPublisher()).thenReturn(mockLease);
    runner = TestRunners.newTestRunner(new PublishKafkaRecord_0_11() {

        @Override
        protected PublisherPool createPublisherPool(final ProcessContext context) {
            return mockPool;
        }
    });
    runner.setProperty(PublishKafkaRecord_0_11.TOPIC, TOPIC_NAME);
    final String readerId = "record-reader";
    final MockRecordParser readerService = new MockRecordParser();
    readerService.addSchemaField("name", RecordFieldType.STRING);
    readerService.addSchemaField("age", RecordFieldType.INT);
    runner.addControllerService(readerId, readerService);
    runner.enableControllerService(readerService);
    final String writerId = "record-writer";
    final RecordSetWriterFactory writerService = new MockRecordWriter("name, age");
    runner.addControllerService(writerId, writerService);
    runner.enableControllerService(writerService);
    runner.setProperty(PublishKafkaRecord_0_11.RECORD_READER, readerId);
    runner.setProperty(PublishKafkaRecord_0_11.RECORD_WRITER, writerId);
    runner.setProperty(PublishKafka_0_11.DELIVERY_GUARANTEE, PublishKafka_0_11.DELIVERY_REPLICATED);
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) MockFlowFile(org.apache.nifi.util.MockFlowFile) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) RecordSet(org.apache.nifi.serialization.record.RecordSet) MockRecordWriter(org.apache.nifi.processors.kafka.pubsub.util.MockRecordWriter) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) ProcessContext(org.apache.nifi.processor.ProcessContext) MockRecordParser(org.apache.nifi.processors.kafka.pubsub.util.MockRecordParser) Before(org.junit.Before)

Example 92 with ProcessContext

use of org.apache.nifi.processor.ProcessContext in project nifi by apache.

the class PutSQL method onBatchUpdateError.

private ExceptionHandler.OnError<FunctionContext, StatementFlowFileEnclosure> onBatchUpdateError(final ProcessContext context, final ProcessSession session, final RoutingResult result) {
    return RollbackOnFailure.createOnError((c, enclosure, r, e) -> {
        // If rollbackOnFailure is enabled, the error will be thrown as ProcessException instead.
        if (e instanceof BatchUpdateException && !c.isRollbackOnFailure()) {
            // If we get a BatchUpdateException, then we want to determine which FlowFile caused the failure,
            // and route that FlowFile to failure while routing those that finished processing to success and those
            // that have not yet been executed to retry.
            // Currently fragmented transaction does not use batch update.
            final int[] updateCounts = ((BatchUpdateException) e).getUpdateCounts();
            final List<FlowFile> batchFlowFiles = enclosure.getFlowFiles();
            // In the presence of a BatchUpdateException, the driver has the option of either stopping when an error
            // occurs, or continuing. If it continues, then it must account for all statements in the batch and for
            // those that fail return a Statement.EXECUTE_FAILED for the number of rows updated.
            // So we will iterate over all of the update counts returned. If any is equal to Statement.EXECUTE_FAILED,
            // we will route the corresponding FlowFile to failure. Otherwise, the FlowFile will go to success
            // unless it has not yet been processed (its index in the List > updateCounts.length).
            int failureCount = 0;
            int successCount = 0;
            int retryCount = 0;
            for (int i = 0; i < updateCounts.length; i++) {
                final int updateCount = updateCounts[i];
                final FlowFile flowFile = batchFlowFiles.get(i);
                if (updateCount == Statement.EXECUTE_FAILED) {
                    result.routeTo(flowFile, REL_FAILURE);
                    failureCount++;
                } else {
                    result.routeTo(flowFile, REL_SUCCESS);
                    successCount++;
                }
            }
            if (failureCount == 0) {
                // if no failures found, the driver decided not to execute the statements after the
                // failure, so route the last one to failure.
                final FlowFile failedFlowFile = batchFlowFiles.get(updateCounts.length);
                result.routeTo(failedFlowFile, REL_FAILURE);
                failureCount++;
            }
            if (updateCounts.length < batchFlowFiles.size()) {
                final List<FlowFile> unexecuted = batchFlowFiles.subList(updateCounts.length + 1, batchFlowFiles.size());
                for (final FlowFile flowFile : unexecuted) {
                    result.routeTo(flowFile, REL_RETRY);
                    retryCount++;
                }
            }
            getLogger().error("Failed to update database due to a failed batch update, {}. There were a total of {} FlowFiles that failed, {} that succeeded, " + "and {} that were not execute and will be routed to retry; ", new Object[] { e, failureCount, successCount, retryCount }, e);
            return;
        }
        // Apply default error handling and logging for other Exceptions.
        ExceptionHandler.OnError<RollbackOnFailure, FlowFileGroup> onGroupError = ExceptionHandler.createOnGroupError(context, session, result, REL_FAILURE, REL_RETRY);
        onGroupError = onGroupError.andThen((cl, il, rl, el) -> {
            switch(r.destination()) {
                case Failure:
                    getLogger().error("Failed to update database for {} due to {}; routing to failure", new Object[] { il.getFlowFiles(), e }, e);
                    break;
                case Retry:
                    getLogger().error("Failed to update database for {} due to {}; it is possible that retrying the operation will succeed, so routing to retry", new Object[] { il.getFlowFiles(), e }, e);
                    break;
            }
        });
        onGroupError.apply(c, enclosure, r, e);
    });
}
Also used : ExceptionHandler(org.apache.nifi.processor.util.pattern.ExceptionHandler) StandardValidators(org.apache.nifi.processor.util.StandardValidators) FetchFlowFiles(org.apache.nifi.processor.util.pattern.PartialFunctions.FetchFlowFiles) FlowFileFilter(org.apache.nifi.processor.FlowFileFilter) SQLNonTransientException(java.sql.SQLNonTransientException) Connection(java.sql.Connection) BiFunction(java.util.function.BiFunction) ExceptionHandler.createOnError(org.apache.nifi.processor.util.pattern.ExceptionHandler.createOnError) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) ErrorTypes(org.apache.nifi.processor.util.pattern.ErrorTypes) RoutingResult(org.apache.nifi.processor.util.pattern.RoutingResult) WritesAttributes(org.apache.nifi.annotation.behavior.WritesAttributes) ResultSet(java.sql.ResultSet) Map(java.util.Map) ReadsAttributes(org.apache.nifi.annotation.behavior.ReadsAttributes) ExceptionHandler(org.apache.nifi.processor.util.pattern.ExceptionHandler) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) PutGroup(org.apache.nifi.processor.util.pattern.PutGroup) FlowFile(org.apache.nifi.flowfile.FlowFile) FragmentAttributes(org.apache.nifi.flowfile.attributes.FragmentAttributes) Set(java.util.Set) WritesAttribute(org.apache.nifi.annotation.behavior.WritesAttribute) PreparedStatement(java.sql.PreparedStatement) StandardCharsets(java.nio.charset.StandardCharsets) InputRequirement(org.apache.nifi.annotation.behavior.InputRequirement) List(java.util.List) JdbcCommon(org.apache.nifi.processors.standard.util.JdbcCommon) Tags(org.apache.nifi.annotation.documentation.Tags) DBCPService(org.apache.nifi.dbcp.DBCPService) ReadsAttribute(org.apache.nifi.annotation.behavior.ReadsAttribute) RollbackOnFailure(org.apache.nifi.processor.util.pattern.RollbackOnFailure) CapabilityDescription(org.apache.nifi.annotation.documentation.CapabilityDescription) BatchUpdateException(java.sql.BatchUpdateException) HashMap(java.util.HashMap) ProcessException(org.apache.nifi.processor.exception.ProcessException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SQLException(java.sql.SQLException) Relationship(org.apache.nifi.processor.Relationship) Requirement(org.apache.nifi.annotation.behavior.InputRequirement.Requirement) AbstractSessionFactoryProcessor(org.apache.nifi.processor.AbstractSessionFactoryProcessor) PartialFunctions(org.apache.nifi.processor.util.pattern.PartialFunctions) FlowFileGroup(org.apache.nifi.processor.util.pattern.PartialFunctions.FlowFileGroup) ProcessContext(org.apache.nifi.processor.ProcessContext) ProcessSession(org.apache.nifi.processor.ProcessSession) IOException(java.io.IOException) SeeAlso(org.apache.nifi.annotation.documentation.SeeAlso) ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) TimeUnit(java.util.concurrent.TimeUnit) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled) SupportsBatching(org.apache.nifi.annotation.behavior.SupportsBatching) StreamUtils(org.apache.nifi.stream.io.StreamUtils) Statement(java.sql.Statement) BitSet(java.util.BitSet) Comparator(java.util.Comparator) InputStream(java.io.InputStream) FlowFile(org.apache.nifi.flowfile.FlowFile) FlowFileGroup(org.apache.nifi.processor.util.pattern.PartialFunctions.FlowFileGroup) RollbackOnFailure(org.apache.nifi.processor.util.pattern.RollbackOnFailure) BatchUpdateException(java.sql.BatchUpdateException)

Example 93 with ProcessContext

use of org.apache.nifi.processor.ProcessContext in project nifi by apache.

the class SplitXml method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final FlowFile original = session.get();
    if (original == null) {
        return;
    }
    final int depth = context.getProperty(SPLIT_DEPTH).asInteger();
    final ComponentLog logger = getLogger();
    final List<FlowFile> splits = new ArrayList<>();
    final String fragmentIdentifier = UUID.randomUUID().toString();
    final AtomicInteger numberOfRecords = new AtomicInteger(0);
    final XmlSplitterSaxParser parser = new XmlSplitterSaxParser(xmlTree -> {
        FlowFile split = session.create(original);
        split = session.write(split, out -> out.write(xmlTree.getBytes("UTF-8")));
        split = session.putAttribute(split, FRAGMENT_ID.key(), fragmentIdentifier);
        split = session.putAttribute(split, FRAGMENT_INDEX.key(), Integer.toString(numberOfRecords.getAndIncrement()));
        split = session.putAttribute(split, SEGMENT_ORIGINAL_FILENAME.key(), split.getAttribute(CoreAttributes.FILENAME.key()));
        splits.add(split);
    }, depth);
    final AtomicBoolean failed = new AtomicBoolean(false);
    session.read(original, rawIn -> {
        try (final InputStream in = new java.io.BufferedInputStream(rawIn)) {
            try {
                final XMLReader reader = XmlUtils.createSafeSaxReader(saxParserFactory, parser);
                reader.parse(new InputSource(in));
            } catch (final ParserConfigurationException | SAXException e) {
                logger.error("Unable to parse {} due to {}", new Object[] { original, e });
                failed.set(true);
            }
        }
    });
    if (failed.get()) {
        session.transfer(original, REL_FAILURE);
        session.remove(splits);
    } else {
        splits.forEach((split) -> {
            split = session.putAttribute(split, FRAGMENT_COUNT.key(), Integer.toString(numberOfRecords.get()));
            session.transfer(split, REL_SPLIT);
        });
        final FlowFile originalToTransfer = copyAttributesToOriginal(session, original, fragmentIdentifier, numberOfRecords.get());
        session.transfer(originalToTransfer, REL_ORIGINAL);
        logger.info("Split {} into {} FlowFiles", new Object[] { originalToTransfer, splits.size() });
    }
}
Also used : StandardValidators(org.apache.nifi.processor.util.StandardValidators) XmlUtils(org.apache.nifi.security.xml.XmlUtils) LoggerFactory(org.slf4j.LoggerFactory) SystemResource(org.apache.nifi.annotation.behavior.SystemResource) SideEffectFree(org.apache.nifi.annotation.behavior.SideEffectFree) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) WritesAttributes(org.apache.nifi.annotation.behavior.WritesAttributes) Map(java.util.Map) FragmentAttributes.copyAttributesToOriginal(org.apache.nifi.flowfile.attributes.FragmentAttributes.copyAttributesToOriginal) StringEscapeUtils(org.apache.commons.lang3.StringEscapeUtils) FlowFile(org.apache.nifi.flowfile.FlowFile) FRAGMENT_ID(org.apache.nifi.flowfile.attributes.FragmentAttributes.FRAGMENT_ID) Set(java.util.Set) UUID(java.util.UUID) WritesAttribute(org.apache.nifi.annotation.behavior.WritesAttribute) List(java.util.List) InputRequirement(org.apache.nifi.annotation.behavior.InputRequirement) SystemResourceConsideration(org.apache.nifi.annotation.behavior.SystemResourceConsideration) SAXException(org.xml.sax.SAXException) Entry(java.util.Map.Entry) Tags(org.apache.nifi.annotation.documentation.Tags) ProcessorInitializationContext(org.apache.nifi.processor.ProcessorInitializationContext) FRAGMENT_INDEX(org.apache.nifi.flowfile.attributes.FragmentAttributes.FRAGMENT_INDEX) CapabilityDescription(org.apache.nifi.annotation.documentation.CapabilityDescription) SAXParserFactory(javax.xml.parsers.SAXParserFactory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) EventDriven(org.apache.nifi.annotation.behavior.EventDriven) ComponentLog(org.apache.nifi.logging.ComponentLog) Locator(org.xml.sax.Locator) ArrayList(java.util.ArrayList) FRAGMENT_COUNT(org.apache.nifi.flowfile.attributes.FragmentAttributes.FRAGMENT_COUNT) HashSet(java.util.HashSet) XMLReader(org.xml.sax.XMLReader) Relationship(org.apache.nifi.processor.Relationship) Requirement(org.apache.nifi.annotation.behavior.InputRequirement.Requirement) Attributes(org.xml.sax.Attributes) ContentHandler(org.xml.sax.ContentHandler) InputSource(org.xml.sax.InputSource) Logger(org.slf4j.Logger) ProcessContext(org.apache.nifi.processor.ProcessContext) ProcessSession(org.apache.nifi.processor.ProcessSession) SEGMENT_ORIGINAL_FILENAME(org.apache.nifi.flowfile.attributes.FragmentAttributes.SEGMENT_ORIGINAL_FILENAME) XmlElementNotifier(org.apache.nifi.processors.standard.util.XmlElementNotifier) TreeMap(java.util.TreeMap) SupportsBatching(org.apache.nifi.annotation.behavior.SupportsBatching) ParserConfigurationException(javax.xml.parsers.ParserConfigurationException) AbstractProcessor(org.apache.nifi.processor.AbstractProcessor) CoreAttributes(org.apache.nifi.flowfile.attributes.CoreAttributes) Collections(java.util.Collections) InputStream(java.io.InputStream) FlowFile(org.apache.nifi.flowfile.FlowFile) InputSource(org.xml.sax.InputSource) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) ComponentLog(org.apache.nifi.logging.ComponentLog) SAXException(org.xml.sax.SAXException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ParserConfigurationException(javax.xml.parsers.ParserConfigurationException) XMLReader(org.xml.sax.XMLReader)

Example 94 with ProcessContext

use of org.apache.nifi.processor.ProcessContext in project nifi by apache.

the class Wait method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final ComponentLog logger = getLogger();
    // Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support
    final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER);
    final Integer bufferCount = context.getProperty(WAIT_BUFFER_COUNT).asInteger();
    final Map<Relationship, List<FlowFile>> processedFlowFiles = new HashMap<>();
    final Function<Relationship, List<FlowFile>> getFlowFilesFor = r -> processedFlowFiles.computeIfAbsent(r, k -> new ArrayList<>());
    final AtomicReference<String> targetSignalId = new AtomicReference<>();
    final AtomicInteger bufferedCount = new AtomicInteger(0);
    final List<FlowFile> failedFilteringFlowFiles = new ArrayList<>();
    final Supplier<FlowFileFilter.FlowFileFilterResult> acceptResultSupplier = () -> bufferedCount.incrementAndGet() == bufferCount ? ACCEPT_AND_TERMINATE : ACCEPT_AND_CONTINUE;
    final List<FlowFile> flowFiles = session.get(f -> {
        final String fSignalId = signalIdProperty.evaluateAttributeExpressions(f).getValue();
        // if the computed value is null, or empty, we transfer the FlowFile to failure relationship
        if (StringUtils.isBlank(fSignalId)) {
            // We can't penalize f before getting it from session, so keep it in a temporal list.
            logger.error("FlowFile {} has no attribute for given Release Signal Identifier", new Object[] { f });
            failedFilteringFlowFiles.add(f);
            return ACCEPT_AND_CONTINUE;
        }
        final String targetSignalIdStr = targetSignalId.get();
        if (targetSignalIdStr == null) {
            // This is the first one.
            targetSignalId.set(fSignalId);
            return acceptResultSupplier.get();
        }
        if (targetSignalIdStr.equals(fSignalId)) {
            return acceptResultSupplier.get();
        }
        return REJECT_AND_CONTINUE;
    });
    final String attributeCopyMode = context.getProperty(ATTRIBUTE_COPY_MODE).getValue();
    final boolean replaceOriginalAttributes = ATTRIBUTE_COPY_REPLACE.getValue().equals(attributeCopyMode);
    final AtomicReference<Signal> signalRef = new AtomicReference<>();
    // This map contains original counts before those are consumed to release incoming FlowFiles.
    final HashMap<String, Long> originalSignalCounts = new HashMap<>();
    final Consumer<FlowFile> transferToFailure = flowFile -> {
        flowFile = session.penalize(flowFile);
        getFlowFilesFor.apply(REL_FAILURE).add(flowFile);
    };
    final Consumer<Entry<Relationship, List<FlowFile>>> transferFlowFiles = routedFlowFiles -> {
        Relationship relationship = routedFlowFiles.getKey();
        if (REL_WAIT.equals(relationship)) {
            final String waitMode = context.getProperty(WAIT_MODE).getValue();
            if (WAIT_MODE_KEEP_IN_UPSTREAM.getValue().equals(waitMode)) {
                // Transfer to self.
                relationship = Relationship.SELF;
            }
        }
        final List<FlowFile> flowFilesWithSignalAttributes = routedFlowFiles.getValue().stream().map(f -> copySignalAttributes(session, f, signalRef.get(), originalSignalCounts, replaceOriginalAttributes)).collect(Collectors.toList());
        session.transfer(flowFilesWithSignalAttributes, relationship);
    };
    failedFilteringFlowFiles.forEach(f -> {
        flowFiles.remove(f);
        transferToFailure.accept(f);
    });
    if (flowFiles.isEmpty()) {
        // If there was nothing but failed FlowFiles while filtering, transfer those and end immediately.
        processedFlowFiles.entrySet().forEach(transferFlowFiles);
        return;
    }
    // the cache client used to interact with the distributed cache
    final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(AtomicDistributedMapCacheClient.class);
    final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache);
    final String signalId = targetSignalId.get();
    final Signal signal;
    // get notifying signal
    try {
        signal = protocol.getSignal(signalId);
        if (signal != null) {
            originalSignalCounts.putAll(signal.getCounts());
        }
        signalRef.set(signal);
    } catch (final IOException e) {
        throw new ProcessException(String.format("Failed to get signal for %s due to %s", signalId, e), e);
    }
    String targetCounterName = null;
    long targetCount = 1;
    int releasableFlowFileCount = 1;
    final List<FlowFile> candidates = new ArrayList<>();
    for (FlowFile flowFile : flowFiles) {
        // Set wait start timestamp if it's not set yet
        String waitStartTimestamp = flowFile.getAttribute(WAIT_START_TIMESTAMP);
        if (waitStartTimestamp == null) {
            waitStartTimestamp = String.valueOf(System.currentTimeMillis());
            flowFile = session.putAttribute(flowFile, WAIT_START_TIMESTAMP, waitStartTimestamp);
        }
        long lWaitStartTimestamp;
        try {
            lWaitStartTimestamp = Long.parseLong(waitStartTimestamp);
        } catch (NumberFormatException nfe) {
            logger.error("{} has an invalid value '{}' on FlowFile {}", new Object[] { WAIT_START_TIMESTAMP, waitStartTimestamp, flowFile });
            transferToFailure.accept(flowFile);
            continue;
        }
        // check for expiration
        long expirationDuration = context.getProperty(EXPIRATION_DURATION).asTimePeriod(TimeUnit.MILLISECONDS);
        long now = System.currentTimeMillis();
        if (now > (lWaitStartTimestamp + expirationDuration)) {
            logger.info("FlowFile {} expired after {}ms", new Object[] { flowFile, (now - lWaitStartTimestamp) });
            getFlowFilesFor.apply(REL_EXPIRED).add(flowFile);
            continue;
        }
        // If there's no signal yet, then we don't have to evaluate target counts. Return immediately.
        if (signal == null) {
            if (logger.isDebugEnabled()) {
                logger.debug("No release signal found for {} on FlowFile {} yet", new Object[] { signalId, flowFile });
            }
            getFlowFilesFor.apply(REL_WAIT).add(flowFile);
            continue;
        }
        // Fix target counter name and count from current FlowFile, if those are not set yet.
        if (candidates.isEmpty()) {
            targetCounterName = context.getProperty(SIGNAL_COUNTER_NAME).evaluateAttributeExpressions(flowFile).getValue();
            try {
                targetCount = Long.valueOf(context.getProperty(TARGET_SIGNAL_COUNT).evaluateAttributeExpressions(flowFile).getValue());
            } catch (final NumberFormatException e) {
                transferToFailure.accept(flowFile);
                logger.error("Failed to parse targetCount when processing {} due to {}", new Object[] { flowFile, e }, e);
                continue;
            }
            try {
                releasableFlowFileCount = Integer.valueOf(context.getProperty(RELEASABLE_FLOWFILE_COUNT).evaluateAttributeExpressions(flowFile).getValue());
            } catch (final NumberFormatException e) {
                transferToFailure.accept(flowFile);
                logger.error("Failed to parse releasableFlowFileCount when processing {} due to {}", new Object[] { flowFile, e }, e);
                continue;
            }
        }
        // FlowFile is now validated and added to candidates.
        candidates.add(flowFile);
    }
    boolean waitCompleted = false;
    boolean waitProgressed = false;
    if (signal != null && !candidates.isEmpty()) {
        if (releasableFlowFileCount > 0) {
            signal.releaseCandidates(targetCounterName, targetCount, releasableFlowFileCount, candidates, released -> getFlowFilesFor.apply(REL_SUCCESS).addAll(released), waiting -> getFlowFilesFor.apply(REL_WAIT).addAll(waiting));
            waitCompleted = signal.getTotalCount() == 0 && signal.getReleasableCount() == 0;
            waitProgressed = !getFlowFilesFor.apply(REL_SUCCESS).isEmpty();
        } else {
            boolean reachedTargetCount = StringUtils.isBlank(targetCounterName) ? signal.isTotalCountReached(targetCount) : signal.isCountReached(targetCounterName, targetCount);
            if (reachedTargetCount) {
                getFlowFilesFor.apply(REL_SUCCESS).addAll(candidates);
            } else {
                getFlowFilesFor.apply(REL_WAIT).addAll(candidates);
            }
        }
    }
    // Transfer FlowFiles.
    processedFlowFiles.entrySet().forEach(transferFlowFiles);
    // Update signal if needed.
    try {
        if (waitCompleted) {
            protocol.complete(signalId);
        } else if (waitProgressed) {
            protocol.replace(signal);
        }
    } catch (final IOException e) {
        session.rollback();
        throw new ProcessException(String.format("Unable to communicate with cache while updating %s due to %s", signalId, e), e);
    }
}
Also used : StandardValidators(org.apache.nifi.processor.util.StandardValidators) FlowFileFilter(org.apache.nifi.processor.FlowFileFilter) CapabilityDescription(org.apache.nifi.annotation.documentation.CapabilityDescription) ResultType(org.apache.nifi.expression.AttributeExpression.ResultType) HashMap(java.util.HashMap) EventDriven(org.apache.nifi.annotation.behavior.EventDriven) ACCEPT_AND_CONTINUE(org.apache.nifi.processor.FlowFileFilter.FlowFileFilterResult.ACCEPT_AND_CONTINUE) ComponentLog(org.apache.nifi.logging.ComponentLog) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Supplier(java.util.function.Supplier) StringUtils(org.apache.commons.lang3.StringUtils) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) ProcessException(org.apache.nifi.processor.exception.ProcessException) ArrayList(java.util.ArrayList) PropertyValue(org.apache.nifi.components.PropertyValue) HashSet(java.util.HashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) WritesAttributes(org.apache.nifi.annotation.behavior.WritesAttributes) Relationship(org.apache.nifi.processor.Relationship) Map(java.util.Map) Requirement(org.apache.nifi.annotation.behavior.InputRequirement.Requirement) ACCEPT_AND_TERMINATE(org.apache.nifi.processor.FlowFileFilter.FlowFileFilterResult.ACCEPT_AND_TERMINATE) AtomicDistributedMapCacheClient(org.apache.nifi.distributed.cache.client.AtomicDistributedMapCacheClient) Signal(org.apache.nifi.processors.standard.WaitNotifyProtocol.Signal) FlowFile(org.apache.nifi.flowfile.FlowFile) ProcessContext(org.apache.nifi.processor.ProcessContext) Set(java.util.Set) IOException(java.io.IOException) ProcessSession(org.apache.nifi.processor.ProcessSession) WritesAttribute(org.apache.nifi.annotation.behavior.WritesAttribute) SeeAlso(org.apache.nifi.annotation.documentation.SeeAlso) AllowableValue(org.apache.nifi.components.AllowableValue) Collectors(java.util.stream.Collectors) REJECT_AND_CONTINUE(org.apache.nifi.processor.FlowFileFilter.FlowFileFilterResult.REJECT_AND_CONTINUE) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) List(java.util.List) InputRequirement(org.apache.nifi.annotation.behavior.InputRequirement) SupportsBatching(org.apache.nifi.annotation.behavior.SupportsBatching) Entry(java.util.Map.Entry) AbstractProcessor(org.apache.nifi.processor.AbstractProcessor) Tags(org.apache.nifi.annotation.documentation.Tags) Collections(java.util.Collections) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Signal(org.apache.nifi.processors.standard.WaitNotifyProtocol.Signal) Entry(java.util.Map.Entry) AtomicDistributedMapCacheClient(org.apache.nifi.distributed.cache.client.AtomicDistributedMapCacheClient) ArrayList(java.util.ArrayList) List(java.util.List) FlowFile(org.apache.nifi.flowfile.FlowFile) PropertyValue(org.apache.nifi.components.PropertyValue) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) ComponentLog(org.apache.nifi.logging.ComponentLog) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ProcessException(org.apache.nifi.processor.exception.ProcessException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Relationship(org.apache.nifi.processor.Relationship)

Example 95 with ProcessContext

use of org.apache.nifi.processor.ProcessContext in project nifi by apache.

the class TestJmsConsumer method testMap2FlowFileTextMessage.

@Test
public void testMap2FlowFileTextMessage() throws Exception {
    TestRunner runner = TestRunners.newTestRunner(GetJMSQueue.class);
    TextMessage textMessage = new ActiveMQTextMessage();
    String payload = "Hello world!";
    textMessage.setText(payload);
    ProcessContext context = runner.getProcessContext();
    ProcessSession session = runner.getProcessSessionFactory().createSession();
    ProcessorInitializationContext pic = new MockProcessorInitializationContext(runner.getProcessor(), (MockProcessContext) runner.getProcessContext());
    JmsProcessingSummary summary = JmsConsumer.map2FlowFile(context, session, textMessage, true, pic.getLogger());
    assertEquals("TextMessage content length should equal to FlowFile content size", payload.length(), summary.getLastFlowFile().getSize());
    final byte[] buffer = new byte[payload.length()];
    runner.clearTransferState();
    session.read(summary.getLastFlowFile(), new InputStreamCallback() {

        @Override
        public void process(InputStream in) throws IOException {
            StreamUtils.fillBuffer(in, buffer, false);
        }
    });
    String contentString = new String(buffer, "UTF-8");
    assertEquals("", payload, contentString);
}
Also used : ProcessSession(org.apache.nifi.processor.ProcessSession) TestRunner(org.apache.nifi.util.TestRunner) InputStream(java.io.InputStream) MockProcessorInitializationContext(org.apache.nifi.util.MockProcessorInitializationContext) JmsProcessingSummary(org.apache.nifi.processors.standard.util.JmsProcessingSummary) IOException(java.io.IOException) ProcessContext(org.apache.nifi.processor.ProcessContext) MockProcessContext(org.apache.nifi.util.MockProcessContext) MockProcessorInitializationContext(org.apache.nifi.util.MockProcessorInitializationContext) ProcessorInitializationContext(org.apache.nifi.processor.ProcessorInitializationContext) ActiveMQTextMessage(org.apache.activemq.command.ActiveMQTextMessage) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) TextMessage(javax.jms.TextMessage) ActiveMQTextMessage(org.apache.activemq.command.ActiveMQTextMessage) Test(org.junit.Test)

Aggregations

ProcessContext (org.apache.nifi.processor.ProcessContext)115 Test (org.junit.Test)67 TestRunner (org.apache.nifi.util.TestRunner)56 ProcessSession (org.apache.nifi.processor.ProcessSession)49 FlowFile (org.apache.nifi.flowfile.FlowFile)40 MockFlowFile (org.apache.nifi.util.MockFlowFile)39 HashSet (java.util.HashSet)35 Relationship (org.apache.nifi.processor.Relationship)35 List (java.util.List)34 PropertyDescriptor (org.apache.nifi.components.PropertyDescriptor)34 ArrayList (java.util.ArrayList)33 Set (java.util.Set)33 Tags (org.apache.nifi.annotation.documentation.Tags)31 IOException (java.io.IOException)30 HashMap (java.util.HashMap)30 CapabilityDescription (org.apache.nifi.annotation.documentation.CapabilityDescription)30 ProcessException (org.apache.nifi.processor.exception.ProcessException)30 Collections (java.util.Collections)29 InputRequirement (org.apache.nifi.annotation.behavior.InputRequirement)29 ProcessSessionFactory (org.apache.nifi.processor.ProcessSessionFactory)29