Search in sources :

Example 16 with Connection

use of org.apache.nifi.connectable.Connection in project nifi by apache.

the class StandardProcessorDAO method validateProposedConfiguration.

private List<String> validateProposedConfiguration(final ProcessorNode processorNode, final ProcessorConfigDTO config) {
    List<String> validationErrors = new ArrayList<>();
    // validate settings
    if (isNotNull(config.getPenaltyDuration())) {
        Matcher penaltyMatcher = FormatUtils.TIME_DURATION_PATTERN.matcher(config.getPenaltyDuration());
        if (!penaltyMatcher.matches()) {
            validationErrors.add("Penalty duration is not a valid time duration (ie 30 sec, 5 min)");
        }
    }
    if (isNotNull(config.getYieldDuration())) {
        Matcher yieldMatcher = FormatUtils.TIME_DURATION_PATTERN.matcher(config.getYieldDuration());
        if (!yieldMatcher.matches()) {
            validationErrors.add("Yield duration is not a valid time duration (ie 30 sec, 5 min)");
        }
    }
    if (isNotNull(config.getBulletinLevel())) {
        try {
            LogLevel.valueOf(config.getBulletinLevel());
        } catch (IllegalArgumentException iae) {
            validationErrors.add(String.format("Bulletin level: Value must be one of [%s]", StringUtils.join(LogLevel.values(), ", ")));
        }
    }
    if (isNotNull(config.getExecutionNode())) {
        try {
            ExecutionNode.valueOf(config.getExecutionNode());
        } catch (IllegalArgumentException iae) {
            validationErrors.add(String.format("Execution node: Value must be one of [%s]", StringUtils.join(ExecutionNode.values(), ", ")));
        }
    }
    // get the current scheduling strategy
    SchedulingStrategy schedulingStrategy = processorNode.getSchedulingStrategy();
    // validate the new scheduling strategy if appropriate
    if (isNotNull(config.getSchedulingStrategy())) {
        try {
            // this will be the new scheduling strategy so use it
            schedulingStrategy = SchedulingStrategy.valueOf(config.getSchedulingStrategy());
        } catch (IllegalArgumentException iae) {
            validationErrors.add(String.format("Scheduling strategy: Value must be one of [%s]", StringUtils.join(SchedulingStrategy.values(), ", ")));
        }
    }
    // validate the concurrent tasks based on the scheduling strategy
    if (isNotNull(config.getConcurrentlySchedulableTaskCount())) {
        switch(schedulingStrategy) {
            case TIMER_DRIVEN:
            case PRIMARY_NODE_ONLY:
                if (config.getConcurrentlySchedulableTaskCount() <= 0) {
                    validationErrors.add("Concurrent tasks must be greater than 0.");
                }
                break;
            case EVENT_DRIVEN:
                if (config.getConcurrentlySchedulableTaskCount() < 0) {
                    validationErrors.add("Concurrent tasks must be greater or equal to 0.");
                }
                break;
        }
    }
    // validate the scheduling period based on the scheduling strategy
    if (isNotNull(config.getSchedulingPeriod())) {
        switch(schedulingStrategy) {
            case TIMER_DRIVEN:
            case PRIMARY_NODE_ONLY:
                final Matcher schedulingMatcher = FormatUtils.TIME_DURATION_PATTERN.matcher(config.getSchedulingPeriod());
                if (!schedulingMatcher.matches()) {
                    validationErrors.add("Scheduling period is not a valid time duration (ie 30 sec, 5 min)");
                }
                break;
            case CRON_DRIVEN:
                try {
                    new CronExpression(config.getSchedulingPeriod());
                } catch (final ParseException pe) {
                    throw new IllegalArgumentException(String.format("Scheduling Period '%s' is not a valid cron expression: %s", config.getSchedulingPeriod(), pe.getMessage()));
                } catch (final Exception e) {
                    throw new IllegalArgumentException("Scheduling Period is not a valid cron expression: " + config.getSchedulingPeriod());
                }
                break;
        }
    }
    final Set<String> autoTerminatedRelationships = config.getAutoTerminatedRelationships();
    if (isNotNull(autoTerminatedRelationships)) {
        for (final String relationshipName : autoTerminatedRelationships) {
            final Relationship relationship = new Relationship.Builder().name(relationshipName).build();
            final Set<Connection> connections = processorNode.getConnections(relationship);
            if (isNotNull(connections) && !connections.isEmpty()) {
                validationErrors.add("Cannot automatically terminate '" + relationshipName + "' relationship because a Connection already exists with this relationship");
            }
        }
    }
    return validationErrors;
}
Also used : Matcher(java.util.regex.Matcher) ArrayList(java.util.ArrayList) Connection(org.apache.nifi.connectable.Connection) ProcessorInstantiationException(org.apache.nifi.controller.exception.ProcessorInstantiationException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ResourceNotFoundException(org.apache.nifi.web.ResourceNotFoundException) ParseException(java.text.ParseException) ComponentLifeCycleException(org.apache.nifi.controller.exception.ComponentLifeCycleException) NiFiCoreException(org.apache.nifi.web.NiFiCoreException) ValidationException(org.apache.nifi.controller.exception.ValidationException) SchedulingStrategy(org.apache.nifi.scheduling.SchedulingStrategy) Relationship(org.apache.nifi.processor.Relationship) CronExpression(org.quartz.CronExpression) ParseException(java.text.ParseException)

Example 17 with Connection

use of org.apache.nifi.connectable.Connection in project nifi by apache.

the class TestWriteAheadFlowFileRepository method testResourceClaimsIncremented.

@Test
public void testResourceClaimsIncremented() throws IOException {
    final ResourceClaimManager claimManager = new StandardResourceClaimManager();
    final TestQueueProvider queueProvider = new TestQueueProvider();
    final Connection connection = Mockito.mock(Connection.class);
    when(connection.getIdentifier()).thenReturn("1234");
    when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class));
    final FlowFileSwapManager swapMgr = new MockFlowFileSwapManager();
    final FlowFileQueue queue = new StandardFlowFileQueue("1234", connection, null, null, claimManager, null, swapMgr, null, 10000);
    when(connection.getFlowFileQueue()).thenReturn(queue);
    queueProvider.addConnection(connection);
    final ResourceClaim resourceClaim1 = claimManager.newResourceClaim("container", "section", "1", false, false);
    final ContentClaim claim1 = new StandardContentClaim(resourceClaim1, 0L);
    final ResourceClaim resourceClaim2 = claimManager.newResourceClaim("container", "section", "2", false, false);
    final ContentClaim claim2 = new StandardContentClaim(resourceClaim2, 0L);
    // resource claims' counts should be updated for both the swapped out FlowFile and the non-swapped out FlowFile
    try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
        repo.initialize(claimManager);
        repo.loadFlowFiles(queueProvider, -1L);
        // Create a Repository Record that indicates that a FlowFile was created
        final FlowFileRecord flowFile1 = new StandardFlowFileRecord.Builder().id(1L).addAttribute("uuid", "11111111-1111-1111-1111-111111111111").contentClaim(claim1).build();
        final StandardRepositoryRecord rec1 = new StandardRepositoryRecord(queue);
        rec1.setWorking(flowFile1);
        rec1.setDestination(queue);
        // Create a Record that we can swap out
        final FlowFileRecord flowFile2 = new StandardFlowFileRecord.Builder().id(2L).addAttribute("uuid", "11111111-1111-1111-1111-111111111112").contentClaim(claim2).build();
        final StandardRepositoryRecord rec2 = new StandardRepositoryRecord(queue);
        rec2.setWorking(flowFile2);
        rec2.setDestination(queue);
        final List<RepositoryRecord> records = new ArrayList<>();
        records.add(rec1);
        records.add(rec2);
        repo.updateRepository(records);
        final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue);
        repo.swapFlowFilesOut(Collections.singletonList(flowFile2), queue, swapLocation);
    }
    final ResourceClaimManager recoveryClaimManager = new StandardResourceClaimManager();
    try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
        repo.initialize(recoveryClaimManager);
        final long largestId = repo.loadFlowFiles(queueProvider, 0L);
        // largest ID known is 1 because this doesn't take into account the FlowFiles that have been swapped out
        assertEquals(1, largestId);
    }
    // resource claim 1 will have a single claimant count while resource claim 2 will have no claimant counts
    // because resource claim 2 is referenced only by flowfiles that are swapped out.
    assertEquals(1, recoveryClaimManager.getClaimantCount(resourceClaim1));
    assertEquals(0, recoveryClaimManager.getClaimantCount(resourceClaim2));
    final SwapSummary summary = queue.recoverSwappedFlowFiles();
    assertNotNull(summary);
    assertEquals(2, summary.getMaxFlowFileId().intValue());
    assertEquals(new QueueSize(1, 0L), summary.getQueueSize());
    final List<ResourceClaim> swappedOutClaims = summary.getResourceClaims();
    assertNotNull(swappedOutClaims);
    assertEquals(1, swappedOutClaims.size());
    assertEquals(claim2.getResourceClaim(), swappedOutClaims.get(0));
}
Also used : StandardFlowFileQueue(org.apache.nifi.controller.StandardFlowFileQueue) Connection(org.apache.nifi.connectable.Connection) ArrayList(java.util.ArrayList) StandardResourceClaimManager(org.apache.nifi.controller.repository.claim.StandardResourceClaimManager) ResourceClaimManager(org.apache.nifi.controller.repository.claim.ResourceClaimManager) StandardSwapSummary(org.apache.nifi.controller.swap.StandardSwapSummary) StandardFlowFileQueue(org.apache.nifi.controller.StandardFlowFileQueue) FlowFileQueue(org.apache.nifi.controller.queue.FlowFileQueue) QueueSize(org.apache.nifi.controller.queue.QueueSize) StandardContentClaim(org.apache.nifi.controller.repository.claim.StandardContentClaim) ContentClaim(org.apache.nifi.controller.repository.claim.ContentClaim) StandardContentClaim(org.apache.nifi.controller.repository.claim.StandardContentClaim) Connectable(org.apache.nifi.connectable.Connectable) StandardResourceClaimManager(org.apache.nifi.controller.repository.claim.StandardResourceClaimManager) ResourceClaim(org.apache.nifi.controller.repository.claim.ResourceClaim) Test(org.junit.Test)

Example 18 with Connection

use of org.apache.nifi.connectable.Connection in project nifi by apache.

the class TestProcessorLifecycle method validateProcessorDeletion.

/**
 * Test deletion of processor when connected to another
 *
 * @throws Exception exception
 */
@Test
public void validateProcessorDeletion() throws Exception {
    final FlowControllerAndSystemBundle fcsb = this.buildFlowControllerForTest();
    fc = fcsb.getFlowController();
    ProcessGroup testGroup = fc.createProcessGroup(UUID.randomUUID().toString());
    this.setControllerRootGroup(fc, testGroup);
    ProcessorNode testProcNodeA = fc.createProcessor(TestProcessor.class.getName(), UUID.randomUUID().toString(), fcsb.getSystemBundle().getBundleDetails().getCoordinate());
    testProcNodeA.setProperties(properties);
    testGroup.addProcessor(testProcNodeA);
    ProcessorNode testProcNodeB = fc.createProcessor(TestProcessor.class.getName(), UUID.randomUUID().toString(), fcsb.getSystemBundle().getBundleDetails().getCoordinate());
    testProcNodeB.setProperties(properties);
    testGroup.addProcessor(testProcNodeB);
    Collection<String> relationNames = new ArrayList<>();
    relationNames.add("relation");
    Connection connection = fc.createConnection(UUID.randomUUID().toString(), Connection.class.getName(), testProcNodeA, testProcNodeB, relationNames);
    testGroup.addConnection(connection);
    ProcessScheduler ps = fc.getProcessScheduler();
    ps.startProcessor(testProcNodeA, true);
    ps.startProcessor(testProcNodeB, true);
    try {
        testGroup.removeProcessor(testProcNodeA);
        fail();
    } catch (Exception e) {
    // should throw exception because processor running
    }
    try {
        testGroup.removeProcessor(testProcNodeB);
        fail();
    } catch (Exception e) {
    // should throw exception because processor running
    }
    ps.stopProcessor(testProcNodeB);
    Thread.sleep(100);
    try {
        testGroup.removeProcessor(testProcNodeA);
        fail();
    } catch (Exception e) {
    // should throw exception because destination processor running
    }
    try {
        testGroup.removeProcessor(testProcNodeB);
        fail();
    } catch (Exception e) {
    // should throw exception because source processor running
    }
    ps.stopProcessor(testProcNodeA);
    Thread.sleep(100);
    testGroup.removeProcessor(testProcNodeA);
    testGroup.removeProcessor(testProcNodeB);
    testGroup.shutdown();
}
Also used : ProcessScheduler(org.apache.nifi.controller.ProcessScheduler) ProcessorNode(org.apache.nifi.controller.ProcessorNode) ProcessGroup(org.apache.nifi.groups.ProcessGroup) ArrayList(java.util.ArrayList) Connection(org.apache.nifi.connectable.Connection) ProcessException(org.apache.nifi.processor.exception.ProcessException) Test(org.junit.Test)

Example 19 with Connection

use of org.apache.nifi.connectable.Connection in project nifi by apache.

the class AbstractFlowFileServerProtocol method checkPortStatus.

protected void checkPortStatus(final Peer peer, String portId) throws HandshakeException {
    Port receivedPort = rootGroup.getInputPort(portId);
    if (receivedPort == null) {
        receivedPort = rootGroup.getOutputPort(portId);
    }
    if (receivedPort == null) {
        logger.debug("Responding with ResponseCode UNKNOWN_PORT for identifier {}", portId);
        throw new HandshakeException(ResponseCode.UNKNOWN_PORT, "Received unknown port identifier: " + portId);
    }
    if (!(receivedPort instanceof RootGroupPort)) {
        logger.debug("Responding with ResponseCode UNKNOWN_PORT for identifier {}", portId);
        throw new HandshakeException(ResponseCode.UNKNOWN_PORT, "Received port identifier " + portId + ", but this Port is not a RootGroupPort");
    }
    this.port = (RootGroupPort) receivedPort;
    final PortAuthorizationResult portAuthResult = this.port.checkUserAuthorization(peer.getCommunicationsSession().getUserDn());
    if (!portAuthResult.isAuthorized()) {
        logger.debug("Responding with ResponseCode UNAUTHORIZED: ", portAuthResult.getExplanation());
        throw new HandshakeException(ResponseCode.UNAUTHORIZED, portAuthResult.getExplanation());
    }
    if (!receivedPort.isValid()) {
        logger.debug("Responding with ResponseCode PORT_NOT_IN_VALID_STATE for {}", receivedPort);
        throw new HandshakeException(ResponseCode.PORT_NOT_IN_VALID_STATE, "Port is not valid");
    }
    if (!receivedPort.isRunning()) {
        logger.debug("Responding with ResponseCode PORT_NOT_IN_VALID_STATE for {}", receivedPort);
        throw new HandshakeException(ResponseCode.PORT_NOT_IN_VALID_STATE, "Port not running");
    }
    // we we will simply not service the request but the sender will timeout
    if (getVersionNegotiator().getVersion() > 1) {
        for (final Connection connection : port.getConnections()) {
            if (connection.getFlowFileQueue().isFull()) {
                logger.debug("Responding with ResponseCode PORTS_DESTINATION_FULL for {}", port);
                throw new HandshakeException(ResponseCode.PORTS_DESTINATION_FULL, "Received port identifier " + portId + ", but its destination is full");
            }
        }
    }
}
Also used : RootGroupPort(org.apache.nifi.remote.RootGroupPort) Port(org.apache.nifi.connectable.Port) RootGroupPort(org.apache.nifi.remote.RootGroupPort) Connection(org.apache.nifi.connectable.Connection) HandshakeException(org.apache.nifi.remote.exception.HandshakeException) PortAuthorizationResult(org.apache.nifi.remote.PortAuthorizationResult)

Example 20 with Connection

use of org.apache.nifi.connectable.Connection in project nifi by apache.

the class RelationshipAuditor method createConnectionAdvice.

/**
 * Audits the creation of relationships via createConnection().
 *
 * This method only needs to be run 'after returning'. However, in Java 7 the order in which these methods are returned from Class.getDeclaredMethods (even though there is no order guaranteed)
 * seems to differ from Java 6. SpringAOP depends on this ordering to determine advice precedence. By normalizing all advice into Around advice we can alleviate this issue.
 *
 * @param proceedingJoinPoint join point
 * @return connection
 * @throws java.lang.Throwable ex
 */
@Around("within(org.apache.nifi.web.dao.ConnectionDAO+) && " + "execution(org.apache.nifi.connectable.Connection createConnection(java.lang.String, org.apache.nifi.web.api.dto.ConnectionDTO))")
public Connection createConnectionAdvice(ProceedingJoinPoint proceedingJoinPoint) throws Throwable {
    // perform the underlying operation
    Connection connection = (Connection) proceedingJoinPoint.proceed();
    // audit the connection creation
    final ConnectDetails connectDetails = createConnectDetails(connection, connection.getRelationships());
    final Action action = generateAuditRecordForConnection(connection, Operation.Connect, connectDetails);
    // save the actions
    if (action != null) {
        saveAction(action, logger);
    }
    return connection;
}
Also used : FlowChangeAction(org.apache.nifi.action.FlowChangeAction) Action(org.apache.nifi.action.Action) ConnectDetails(org.apache.nifi.action.details.ConnectDetails) FlowChangeConnectDetails(org.apache.nifi.action.details.FlowChangeConnectDetails) Connection(org.apache.nifi.connectable.Connection) Around(org.aspectj.lang.annotation.Around)

Aggregations

Connection (org.apache.nifi.connectable.Connection)95 ArrayList (java.util.ArrayList)35 HashSet (java.util.HashSet)35 VersionedConnection (org.apache.nifi.registry.flow.VersionedConnection)30 FlowFileQueue (org.apache.nifi.controller.queue.FlowFileQueue)28 Connectable (org.apache.nifi.connectable.Connectable)27 ProcessGroup (org.apache.nifi.groups.ProcessGroup)26 Relationship (org.apache.nifi.processor.Relationship)23 Port (org.apache.nifi.connectable.Port)21 RemoteProcessGroup (org.apache.nifi.groups.RemoteProcessGroup)21 ProcessorNode (org.apache.nifi.controller.ProcessorNode)19 RootGroupPort (org.apache.nifi.remote.RootGroupPort)19 LinkedHashSet (java.util.LinkedHashSet)18 Set (java.util.Set)17 RemoteGroupPort (org.apache.nifi.remote.RemoteGroupPort)17 Funnel (org.apache.nifi.connectable.Funnel)16 HashMap (java.util.HashMap)15 VersionedProcessGroup (org.apache.nifi.registry.flow.VersionedProcessGroup)15 IOException (java.io.IOException)14 Map (java.util.Map)14