Search in sources :

Example 21 with ProcessSessionFactory

use of org.apache.nifi.processor.ProcessSessionFactory in project nifi by apache.

the class ConnectableTask method invoke.

public InvocationResult invoke() {
    if (scheduleState.isTerminated()) {
        return InvocationResult.DO_NOT_YIELD;
    }
    // make sure processor is not yielded
    if (isYielded()) {
        return InvocationResult.DO_NOT_YIELD;
    }
    // make sure that either we're not clustered or this processor runs on all nodes or that this is the primary node
    if (!isRunOnCluster(flowController)) {
        return InvocationResult.DO_NOT_YIELD;
    }
    // * All incoming connections are self-loops
    if (!isWorkToDo()) {
        return InvocationResult.yield("No work to do");
    }
    if (numRelationships > 0) {
        final int requiredNumberOfAvailableRelationships = connectable.isTriggerWhenAnyDestinationAvailable() ? 1 : numRelationships;
        if (!repositoryContext.isRelationshipAvailabilitySatisfied(requiredNumberOfAvailableRelationships)) {
            return InvocationResult.yield("Backpressure Applied");
        }
    }
    final long batchNanos = connectable.getRunDuration(TimeUnit.NANOSECONDS);
    final ProcessSessionFactory sessionFactory;
    final StandardProcessSession rawSession;
    final boolean batch;
    if (connectable.isSessionBatchingSupported() && batchNanos > 0L) {
        rawSession = new StandardProcessSession(repositoryContext, scheduleState::isTerminated);
        sessionFactory = new BatchingSessionFactory(rawSession);
        batch = true;
    } else {
        rawSession = null;
        sessionFactory = new StandardProcessSessionFactory(repositoryContext, scheduleState::isTerminated);
        batch = false;
    }
    final ActiveProcessSessionFactory activeSessionFactory = new WeakHashMapProcessSessionFactory(sessionFactory);
    scheduleState.incrementActiveThreadCount(activeSessionFactory);
    final long startNanos = System.nanoTime();
    final long finishIfBackpressureEngaged = startNanos + (batchNanos / 25L);
    final long finishNanos = startNanos + batchNanos;
    int invocationCount = 0;
    final String originalThreadName = Thread.currentThread().getName();
    try {
        try (final AutoCloseable ncl = NarCloseable.withComponentNarLoader(connectable.getRunnableComponent().getClass(), connectable.getIdentifier())) {
            boolean shouldRun = connectable.getScheduledState() == ScheduledState.RUNNING;
            while (shouldRun) {
                connectable.onTrigger(processContext, activeSessionFactory);
                invocationCount++;
                if (!batch) {
                    return InvocationResult.DO_NOT_YIELD;
                }
                final long nanoTime = System.nanoTime();
                if (nanoTime > finishNanos) {
                    return InvocationResult.DO_NOT_YIELD;
                }
                if (nanoTime > finishIfBackpressureEngaged && isBackPressureEngaged()) {
                    return InvocationResult.DO_NOT_YIELD;
                }
                if (connectable.getScheduledState() != ScheduledState.RUNNING) {
                    break;
                }
                if (!isWorkToDo()) {
                    break;
                }
                if (isYielded()) {
                    break;
                }
                if (numRelationships > 0) {
                    final int requiredNumberOfAvailableRelationships = connectable.isTriggerWhenAnyDestinationAvailable() ? 1 : numRelationships;
                    shouldRun = repositoryContext.isRelationshipAvailabilitySatisfied(requiredNumberOfAvailableRelationships);
                }
            }
        } catch (final TerminatedTaskException tte) {
            final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
            procLog.info("Failed to process session due to task being terminated", new Object[] { tte });
        } catch (final ProcessException pe) {
            final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
            procLog.error("Failed to process session due to {}", new Object[] { pe });
        } catch (final Throwable t) {
            // Use ComponentLog to log the event so that a bulletin will be created for this processor
            final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
            procLog.error("{} failed to process session due to {}; Processor Administratively Yielded for {}", new Object[] { connectable.getRunnableComponent(), t, schedulingAgent.getAdministrativeYieldDuration() }, t);
            logger.warn("Administratively Yielding {} due to uncaught Exception: {}", connectable.getRunnableComponent(), t.toString(), t);
            connectable.yield(schedulingAgent.getAdministrativeYieldDuration(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
        }
    } finally {
        try {
            if (batch) {
                try {
                    rawSession.commit();
                } catch (final Exception e) {
                    final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
                    procLog.error("Failed to commit session {} due to {}; rolling back", new Object[] { rawSession, e.toString() }, e);
                    try {
                        rawSession.rollback(true);
                    } catch (final Exception e1) {
                        procLog.error("Failed to roll back session {} due to {}", new Object[] { rawSession, e.toString() }, e);
                    }
                }
            }
            final long processingNanos = System.nanoTime() - startNanos;
            try {
                final StandardFlowFileEvent procEvent = new StandardFlowFileEvent(connectable.getIdentifier());
                procEvent.setProcessingNanos(processingNanos);
                procEvent.setInvocations(invocationCount);
                repositoryContext.getFlowFileEventRepository().updateRepository(procEvent);
            } catch (final IOException e) {
                logger.error("Unable to update FlowFileEvent Repository for {}; statistics may be inaccurate. Reason for failure: {}", connectable.getRunnableComponent(), e.toString());
                logger.error("", e);
            }
        } finally {
            scheduleState.decrementActiveThreadCount(activeSessionFactory);
            Thread.currentThread().setName(originalThreadName);
        }
    }
    return InvocationResult.DO_NOT_YIELD;
}
Also used : WeakHashMapProcessSessionFactory(org.apache.nifi.controller.repository.WeakHashMapProcessSessionFactory) TerminatedTaskException(org.apache.nifi.processor.exception.TerminatedTaskException) ActiveProcessSessionFactory(org.apache.nifi.controller.repository.ActiveProcessSessionFactory) IOException(java.io.IOException) ComponentLog(org.apache.nifi.logging.ComponentLog) TerminatedTaskException(org.apache.nifi.processor.exception.TerminatedTaskException) ProcessException(org.apache.nifi.processor.exception.ProcessException) IOException(java.io.IOException) ProcessException(org.apache.nifi.processor.exception.ProcessException) StandardFlowFileEvent(org.apache.nifi.controller.repository.metrics.StandardFlowFileEvent) WeakHashMapProcessSessionFactory(org.apache.nifi.controller.repository.WeakHashMapProcessSessionFactory) ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) StandardProcessSessionFactory(org.apache.nifi.controller.repository.StandardProcessSessionFactory) ActiveProcessSessionFactory(org.apache.nifi.controller.repository.ActiveProcessSessionFactory) StandardProcessSession(org.apache.nifi.controller.repository.StandardProcessSession) SimpleProcessLogger(org.apache.nifi.processor.SimpleProcessLogger) BatchingSessionFactory(org.apache.nifi.controller.repository.BatchingSessionFactory) StandardProcessSessionFactory(org.apache.nifi.controller.repository.StandardProcessSessionFactory)

Example 22 with ProcessSessionFactory

use of org.apache.nifi.processor.ProcessSessionFactory in project nifi by apache.

the class GenerateTableFetch method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
    // Fetch the column/table info once (if the table name and max value columns are not dynamic). Otherwise do the setup later
    if (!isDynamicTableName && !isDynamicMaxValues && !setupComplete.get()) {
        super.setup(context);
    }
    ProcessSession session = sessionFactory.createSession();
    FlowFile fileToProcess = null;
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();
        if (fileToProcess == null) {
            // Incoming connection with no flow file available, do no work (see capability description)
            return;
        }
    }
    final ComponentLog logger = getLogger();
    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(fileToProcess).getValue();
    final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions(fileToProcess).getValue();
    final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES).evaluateAttributeExpressions(fileToProcess).getValue();
    final int partitionSize = context.getProperty(PARTITION_SIZE).evaluateAttributeExpressions(fileToProcess).asInteger();
    final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions(fileToProcess).getValue();
    final StateManager stateManager = context.getStateManager();
    final StateMap stateMap;
    FlowFile finalFileToProcess = fileToProcess;
    try {
        stateMap = stateManager.getState(Scope.CLUSTER);
    } catch (final IOException ioe) {
        logger.error("Failed to retrieve observed maximum values from the State Manager. Will not perform " + "query until this is accomplished.", ioe);
        context.yield();
        return;
    }
    try {
        // Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
        // set as the current state map (after the session has been committed)
        final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());
        // If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
        for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
            String maxPropKey = maxProp.getKey().toLowerCase();
            String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey);
            if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
                String newMaxPropValue;
                // but store the new initial max value under the fully-qualified key.
                if (statePropertyMap.containsKey(maxPropKey)) {
                    newMaxPropValue = statePropertyMap.get(maxPropKey);
                } else {
                    newMaxPropValue = maxProp.getValue();
                }
                statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);
            }
        }
        // Build a WHERE clause with maximum-value columns (if they exist), and a list of column names that will contain MAX(<column>) aliases. The
        // executed SQL query will retrieve the count of all records after the filter(s) have been applied, as well as the new maximum values for the
        // specified columns. This allows the processor to generate the correctly partitioned SQL statements as well as to update the state with the
        // latest observed maximum values.
        String whereClause = null;
        List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? new ArrayList<>(0) : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
        List<String> maxValueClauses = new ArrayList<>(maxValueColumnNameList.size());
        String columnsClause = null;
        List<String> maxValueSelectColumns = new ArrayList<>(maxValueColumnNameList.size() + 1);
        maxValueSelectColumns.add("COUNT(*)");
        // For each maximum-value column, get a WHERE filter and a MAX(column) alias
        IntStream.range(0, maxValueColumnNameList.size()).forEach((index) -> {
            String colName = maxValueColumnNameList.get(index);
            maxValueSelectColumns.add("MAX(" + colName + ") " + colName);
            String maxValue = getColumnStateMaxValue(tableName, statePropertyMap, colName);
            if (!StringUtils.isEmpty(maxValue)) {
                if (columnTypeMap.isEmpty() || getColumnType(tableName, colName) == null) {
                    // This means column type cache is clean after instance reboot. We should re-cache column type
                    super.setup(context, false, finalFileToProcess);
                }
                Integer type = getColumnType(tableName, colName);
                // Add a condition for the WHERE clause
                maxValueClauses.add(colName + (index == 0 ? " > " : " >= ") + getLiteralByType(type, maxValue, dbAdapter.getName()));
            }
        });
        if (customWhereClause != null) {
            // adding the custom WHERE clause (if defined) to the list of existing clauses.
            maxValueClauses.add("(" + customWhereClause + ")");
        }
        whereClause = StringUtils.join(maxValueClauses, " AND ");
        columnsClause = StringUtils.join(maxValueSelectColumns, ", ");
        // Build a SELECT query with maximum-value columns (if present)
        final String selectQuery = dbAdapter.getSelectStatement(tableName, columnsClause, whereClause, null, null, null);
        long rowCount = 0;
        try (final Connection con = dbcpService.getConnection();
            final Statement st = con.createStatement()) {
            final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions(fileToProcess).asTimePeriod(TimeUnit.SECONDS).intValue();
            // timeout in seconds
            st.setQueryTimeout(queryTimeout);
            logger.debug("Executing {}", new Object[] { selectQuery });
            ResultSet resultSet;
            resultSet = st.executeQuery(selectQuery);
            if (resultSet.next()) {
                // Total row count is in the first column
                rowCount = resultSet.getLong(1);
                // Update the state map with the newly-observed maximum values
                ResultSetMetaData rsmd = resultSet.getMetaData();
                for (int i = 2; i <= rsmd.getColumnCount(); i++) {
                    // Some JDBC drivers consider the columns name and label to be very different things.
                    // Since this column has been aliased lets check the label first,
                    // if there is no label we'll use the column name.
                    String resultColumnName = (StringUtils.isNotEmpty(rsmd.getColumnLabel(i)) ? rsmd.getColumnLabel(i) : rsmd.getColumnName(i)).toLowerCase();
                    String fullyQualifiedStateKey = getStateKey(tableName, resultColumnName);
                    String resultColumnCurrentMax = statePropertyMap.get(fullyQualifiedStateKey);
                    if (StringUtils.isEmpty(resultColumnCurrentMax) && !isDynamicTableName) {
                        // If we can't find the value at the fully-qualified key name and the table name is static, it is possible (under a previous scheme)
                        // the value has been stored under a key that is only the column name. Fall back to check the column name; either way, when a new
                        // maximum value is observed, it will be stored under the fully-qualified key from then on.
                        resultColumnCurrentMax = statePropertyMap.get(resultColumnName);
                    }
                    int type = rsmd.getColumnType(i);
                    if (isDynamicTableName) {
                        // We haven't pre-populated the column type map if the table name is dynamic, so do it here
                        columnTypeMap.put(fullyQualifiedStateKey, type);
                    }
                    try {
                        String newMaxValue = getMaxValueFromRow(resultSet, i, type, resultColumnCurrentMax, dbAdapter.getName());
                        if (newMaxValue != null) {
                            statePropertyMap.put(fullyQualifiedStateKey, newMaxValue);
                        }
                    } catch (ParseException | IOException pie) {
                        // Fail the whole thing here before we start creating flow files and such
                        throw new ProcessException(pie);
                    }
                }
            } else {
                // Something is very wrong here, one row (even if count is zero) should be returned
                throw new SQLException("No rows returned from metadata query: " + selectQuery);
            }
            // for each maximum-value column get a right bounding WHERE condition
            IntStream.range(0, maxValueColumnNameList.size()).forEach((index) -> {
                String colName = maxValueColumnNameList.get(index);
                maxValueSelectColumns.add("MAX(" + colName + ") " + colName);
                String maxValue = getColumnStateMaxValue(tableName, statePropertyMap, colName);
                if (!StringUtils.isEmpty(maxValue)) {
                    if (columnTypeMap.isEmpty() || getColumnType(tableName, colName) == null) {
                        // This means column type cache is clean after instance reboot. We should re-cache column type
                        super.setup(context, false, finalFileToProcess);
                    }
                    Integer type = getColumnType(tableName, colName);
                    // Add a condition for the WHERE clause
                    maxValueClauses.add(colName + " <= " + getLiteralByType(type, maxValue, dbAdapter.getName()));
                }
            });
            // Update WHERE list to include new right hand boundaries
            whereClause = StringUtils.join(maxValueClauses, " AND ");
            final long numberOfFetches = (partitionSize == 0) ? 1 : (rowCount / partitionSize) + (rowCount % partitionSize == 0 ? 0 : 1);
            // Generate SQL statements to read "pages" of data
            for (long i = 0; i < numberOfFetches; i++) {
                Long limit = partitionSize == 0 ? null : (long) partitionSize;
                Long offset = partitionSize == 0 ? null : i * partitionSize;
                final String maxColumnNames = StringUtils.join(maxValueColumnNameList, ", ");
                final String query = dbAdapter.getSelectStatement(tableName, columnNames, whereClause, maxColumnNames, limit, offset);
                FlowFile sqlFlowFile = (fileToProcess == null) ? session.create() : session.create(fileToProcess);
                sqlFlowFile = session.write(sqlFlowFile, out -> out.write(query.getBytes()));
                sqlFlowFile = session.putAttribute(sqlFlowFile, "generatetablefetch.tableName", tableName);
                if (columnNames != null) {
                    sqlFlowFile = session.putAttribute(sqlFlowFile, "generatetablefetch.columnNames", columnNames);
                }
                if (StringUtils.isNotBlank(whereClause)) {
                    sqlFlowFile = session.putAttribute(sqlFlowFile, "generatetablefetch.whereClause", whereClause);
                }
                if (StringUtils.isNotBlank(maxColumnNames)) {
                    sqlFlowFile = session.putAttribute(sqlFlowFile, "generatetablefetch.maxColumnNames", maxColumnNames);
                }
                sqlFlowFile = session.putAttribute(sqlFlowFile, "generatetablefetch.limit", String.valueOf(limit));
                if (partitionSize != 0) {
                    sqlFlowFile = session.putAttribute(sqlFlowFile, "generatetablefetch.offset", String.valueOf(offset));
                }
                session.transfer(sqlFlowFile, REL_SUCCESS);
            }
            if (fileToProcess != null) {
                session.remove(fileToProcess);
            }
        } catch (SQLException e) {
            if (fileToProcess != null) {
                logger.error("Unable to execute SQL select query {} due to {}, routing {} to failure", new Object[] { selectQuery, e, fileToProcess });
                fileToProcess = session.putAttribute(fileToProcess, "generatetablefetch.sql.error", e.getMessage());
                session.transfer(fileToProcess, REL_FAILURE);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
                throw new ProcessException(e);
            }
        }
        session.commit();
        try {
            // Update the state
            stateManager.setState(statePropertyMap, Scope.CLUSTER);
        } catch (IOException ioe) {
            logger.error("{} failed to update State Manager, observed maximum values will not be recorded. " + "Also, any generated SQL statements may be duplicated.", new Object[] { this, ioe });
        }
    } catch (final ProcessException pe) {
        // Log the cause of the ProcessException if it is available
        Throwable t = (pe.getCause() == null ? pe : pe.getCause());
        logger.error("Error during processing: {}", new Object[] { t.getMessage() }, t);
        session.rollback();
        context.yield();
    }
}
Also used : ProcessSession(org.apache.nifi.processor.ProcessSession) StandardValidators(org.apache.nifi.processor.util.StandardValidators) IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) Connection(java.sql.Connection) CapabilityDescription(org.apache.nifi.annotation.documentation.CapabilityDescription) ValidationContext(org.apache.nifi.components.ValidationContext) HashMap(java.util.HashMap) ComponentLog(org.apache.nifi.logging.ComponentLog) StringUtils(org.apache.commons.lang3.StringUtils) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) ProcessException(org.apache.nifi.processor.exception.ProcessException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) SQLException(java.sql.SQLException) WritesAttributes(org.apache.nifi.annotation.behavior.WritesAttributes) Scope(org.apache.nifi.components.state.Scope) Relationship(org.apache.nifi.processor.Relationship) ResultSet(java.sql.ResultSet) Map(java.util.Map) Requirement(org.apache.nifi.annotation.behavior.InputRequirement.Requirement) ParseException(java.text.ParseException) TriggerSerially(org.apache.nifi.annotation.behavior.TriggerSerially) ValidationResult(org.apache.nifi.components.ValidationResult) DatabaseAdapter(org.apache.nifi.processors.standard.db.DatabaseAdapter) FlowFile(org.apache.nifi.flowfile.FlowFile) StateManager(org.apache.nifi.components.state.StateManager) Collection(java.util.Collection) ProcessContext(org.apache.nifi.processor.ProcessContext) Set(java.util.Set) ProcessSession(org.apache.nifi.processor.ProcessSession) IOException(java.io.IOException) WritesAttribute(org.apache.nifi.annotation.behavior.WritesAttribute) SeeAlso(org.apache.nifi.annotation.documentation.SeeAlso) ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) StateMap(org.apache.nifi.components.state.StateMap) TimeUnit(java.util.concurrent.TimeUnit) InputRequirement(org.apache.nifi.annotation.behavior.InputRequirement) Stateful(org.apache.nifi.annotation.behavior.Stateful) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled) List(java.util.List) DynamicProperty(org.apache.nifi.annotation.behavior.DynamicProperty) Statement(java.sql.Statement) Tags(org.apache.nifi.annotation.documentation.Tags) DBCPService(org.apache.nifi.dbcp.DBCPService) Collections(java.util.Collections) ResultSetMetaData(java.sql.ResultSetMetaData) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) StateMap(org.apache.nifi.components.state.StateMap) ArrayList(java.util.ArrayList) ResultSetMetaData(java.sql.ResultSetMetaData) StateManager(org.apache.nifi.components.state.StateManager) ResultSet(java.sql.ResultSet) FlowFile(org.apache.nifi.flowfile.FlowFile) Statement(java.sql.Statement) Connection(java.sql.Connection) IOException(java.io.IOException) ComponentLog(org.apache.nifi.logging.ComponentLog) DatabaseAdapter(org.apache.nifi.processors.standard.db.DatabaseAdapter) ProcessException(org.apache.nifi.processor.exception.ProcessException) DBCPService(org.apache.nifi.dbcp.DBCPService) ParseException(java.text.ParseException) HashMap(java.util.HashMap) Map(java.util.Map) StateMap(org.apache.nifi.components.state.StateMap)

Example 23 with ProcessSessionFactory

use of org.apache.nifi.processor.ProcessSessionFactory in project nifi by apache.

the class TestListenHTTP method startWebServerAndSendMessages.

private void startWebServerAndSendMessages(final List<String> messages, int returnCode) throws Exception {
    final ProcessSessionFactory processSessionFactory = runner.getProcessSessionFactory();
    final ProcessContext context = runner.getProcessContext();
    proc.createHttpServer(context);
    Runnable sendMessagestoWebServer = () -> {
        try {
            for (final String message : messages) {
                if (executePOST(message) != returnCode) {
                    fail("HTTP POST failed.");
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
            fail("Not expecting error here.");
        }
    };
    new Thread(sendMessagestoWebServer).start();
    long responseTimeout = 10000;
    int numTransferred = 0;
    long startTime = System.currentTimeMillis();
    while (numTransferred < messages.size() && (System.currentTimeMillis() - startTime < responseTimeout)) {
        proc.onTrigger(context, processSessionFactory);
        numTransferred = runner.getFlowFilesForRelationship(RELATIONSHIP_SUCCESS).size();
        Thread.sleep(100);
    }
    runner.assertTransferCount(ListenHTTP.RELATIONSHIP_SUCCESS, messages.size());
}
Also used : ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) ProcessContext(org.apache.nifi.processor.ProcessContext) InitializationException(org.apache.nifi.reporting.InitializationException) IOException(java.io.IOException)

Example 24 with ProcessSessionFactory

use of org.apache.nifi.processor.ProcessSessionFactory in project nifi by apache.

the class TestListenRELP method run.

protected void run(final List<RELPFrame> frames, final int expectedTransferred, final int expectedResponses, final SSLContextService sslContextService) throws IOException, InterruptedException {
    Socket socket = null;
    try {
        // schedule to start listening on a random port
        final ProcessSessionFactory processSessionFactory = runner.getProcessSessionFactory();
        final ProcessContext context = runner.getProcessContext();
        proc.onScheduled(context);
        // create a client connection to the port the dispatcher is listening on
        final int realPort = proc.getDispatcherPort();
        // create either a regular socket or ssl socket based on context being passed in
        if (sslContextService != null) {
            final SSLContext sslContext = sslContextService.createSSLContext(SSLContextService.ClientAuth.REQUIRED);
            socket = sslContext.getSocketFactory().createSocket("localhost", realPort);
        } else {
            socket = new Socket("localhost", realPort);
        }
        Thread.sleep(100);
        // send the frames to the port the processors is listening on
        sendFrames(frames, socket);
        long responseTimeout = 30000;
        // this first loop waits until the internal queue of the processor has the expected
        // number of messages ready before proceeding, we want to guarantee they are all there
        // before onTrigger gets a chance to run
        long startTimeQueueSizeCheck = System.currentTimeMillis();
        while (proc.getQueueSize() < expectedResponses && (System.currentTimeMillis() - startTimeQueueSizeCheck < responseTimeout)) {
            Thread.sleep(100);
        }
        // want to fail here if the queue size isn't what we expect
        Assert.assertEquals(expectedResponses, proc.getQueueSize());
        // call onTrigger until we got a respond for all the frames, or a certain amount of time passes
        long startTimeProcessing = System.currentTimeMillis();
        while (proc.responses.size() < expectedResponses && (System.currentTimeMillis() - startTimeProcessing < responseTimeout)) {
            proc.onTrigger(context, processSessionFactory);
            Thread.sleep(100);
        }
        // should have gotten a response for each frame
        Assert.assertEquals(expectedResponses, proc.responses.size());
        // should have transferred the expected events
        runner.assertTransferCount(ListenRELP.REL_SUCCESS, expectedTransferred);
    } finally {
        // unschedule to close connections
        proc.onUnscheduled();
        IOUtils.closeQuietly(socket);
    }
}
Also used : ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) SSLContext(javax.net.ssl.SSLContext) Socket(java.net.Socket) ProcessContext(org.apache.nifi.processor.ProcessContext)

Example 25 with ProcessSessionFactory

use of org.apache.nifi.processor.ProcessSessionFactory in project nifi by apache.

the class TestListenTCPRecord method runTCP.

protected void runTCP(final List<String> messages, final int expectedTransferred, final SSLContext sslContext) throws IOException, InterruptedException {
    SocketSender sender = null;
    try {
        // schedule to start listening on a random port
        final ProcessSessionFactory processSessionFactory = runner.getProcessSessionFactory();
        final ProcessContext context = runner.getProcessContext();
        proc.onScheduled(context);
        Thread.sleep(100);
        sender = new SocketSender(proc.getDispatcherPort(), "localhost", sslContext, messages, 0);
        final Thread senderThread = new Thread(sender);
        senderThread.setDaemon(true);
        senderThread.start();
        long timeout = 10000;
        // call onTrigger until we processed all the records, or a certain amount of time passes
        int numTransferred = 0;
        long startTime = System.currentTimeMillis();
        while (numTransferred < expectedTransferred && (System.currentTimeMillis() - startTime < timeout)) {
            proc.onTrigger(context, processSessionFactory);
            numTransferred = runner.getFlowFilesForRelationship(ListenTCPRecord.REL_SUCCESS).size();
            Thread.sleep(100);
        }
        // should have transferred the expected events
        runner.assertTransferCount(ListenTCPRecord.REL_SUCCESS, expectedTransferred);
    } finally {
        // unschedule to close connections
        proc.onStopped();
        IOUtils.closeQuietly(sender);
    }
}
Also used : ProcessSessionFactory(org.apache.nifi.processor.ProcessSessionFactory) ProcessContext(org.apache.nifi.processor.ProcessContext)

Aggregations

ProcessSessionFactory (org.apache.nifi.processor.ProcessSessionFactory)35 ProcessContext (org.apache.nifi.processor.ProcessContext)26 TestRunner (org.apache.nifi.util.TestRunner)20 Test (org.junit.Test)20 MockFlowFile (org.apache.nifi.util.MockFlowFile)15 HashMap (java.util.HashMap)9 ManagedChannel (io.grpc.ManagedChannel)7 ProvenanceEventRecord (org.apache.nifi.provenance.ProvenanceEventRecord)7 IOException (java.io.IOException)6 HashSet (java.util.HashSet)6 List (java.util.List)6 Map (java.util.Map)6 Set (java.util.Set)6 Relationship (org.apache.nifi.processor.Relationship)6 ByteString (com.google.protobuf.ByteString)5 ArrayList (java.util.ArrayList)5 TimeUnit (java.util.concurrent.TimeUnit)5 AtomicLong (java.util.concurrent.atomic.AtomicLong)5 FlowFile (org.apache.nifi.flowfile.FlowFile)5 ComponentLog (org.apache.nifi.logging.ComponentLog)5