use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class StandardProcessorDAO method validateProposedConfiguration.
private List<String> validateProposedConfiguration(final ProcessorNode processorNode, final ProcessorConfigDTO config) {
List<String> validationErrors = new ArrayList<>();
// validate settings
if (isNotNull(config.getPenaltyDuration())) {
Matcher penaltyMatcher = FormatUtils.TIME_DURATION_PATTERN.matcher(config.getPenaltyDuration());
if (!penaltyMatcher.matches()) {
validationErrors.add("Penalty duration is not a valid time duration (ie 30 sec, 5 min)");
}
}
if (isNotNull(config.getYieldDuration())) {
Matcher yieldMatcher = FormatUtils.TIME_DURATION_PATTERN.matcher(config.getYieldDuration());
if (!yieldMatcher.matches()) {
validationErrors.add("Yield duration is not a valid time duration (ie 30 sec, 5 min)");
}
}
if (isNotNull(config.getBulletinLevel())) {
try {
LogLevel.valueOf(config.getBulletinLevel());
} catch (IllegalArgumentException iae) {
validationErrors.add(String.format("Bulletin level: Value must be one of [%s]", StringUtils.join(LogLevel.values(), ", ")));
}
}
if (isNotNull(config.getExecutionNode())) {
try {
ExecutionNode.valueOf(config.getExecutionNode());
} catch (IllegalArgumentException iae) {
validationErrors.add(String.format("Execution node: Value must be one of [%s]", StringUtils.join(ExecutionNode.values(), ", ")));
}
}
// get the current scheduling strategy
SchedulingStrategy schedulingStrategy = processorNode.getSchedulingStrategy();
// validate the new scheduling strategy if appropriate
if (isNotNull(config.getSchedulingStrategy())) {
try {
// this will be the new scheduling strategy so use it
schedulingStrategy = SchedulingStrategy.valueOf(config.getSchedulingStrategy());
} catch (IllegalArgumentException iae) {
validationErrors.add(String.format("Scheduling strategy: Value must be one of [%s]", StringUtils.join(SchedulingStrategy.values(), ", ")));
}
}
// validate the concurrent tasks based on the scheduling strategy
if (isNotNull(config.getConcurrentlySchedulableTaskCount())) {
switch(schedulingStrategy) {
case TIMER_DRIVEN:
case PRIMARY_NODE_ONLY:
if (config.getConcurrentlySchedulableTaskCount() <= 0) {
validationErrors.add("Concurrent tasks must be greater than 0.");
}
break;
case EVENT_DRIVEN:
if (config.getConcurrentlySchedulableTaskCount() < 0) {
validationErrors.add("Concurrent tasks must be greater or equal to 0.");
}
break;
}
}
// validate the scheduling period based on the scheduling strategy
if (isNotNull(config.getSchedulingPeriod())) {
switch(schedulingStrategy) {
case TIMER_DRIVEN:
case PRIMARY_NODE_ONLY:
final Matcher schedulingMatcher = FormatUtils.TIME_DURATION_PATTERN.matcher(config.getSchedulingPeriod());
if (!schedulingMatcher.matches()) {
validationErrors.add("Scheduling period is not a valid time duration (ie 30 sec, 5 min)");
}
break;
case CRON_DRIVEN:
try {
new CronExpression(config.getSchedulingPeriod());
} catch (final ParseException pe) {
throw new IllegalArgumentException(String.format("Scheduling Period '%s' is not a valid cron expression: %s", config.getSchedulingPeriod(), pe.getMessage()));
} catch (final Exception e) {
throw new IllegalArgumentException("Scheduling Period is not a valid cron expression: " + config.getSchedulingPeriod());
}
break;
}
}
final Set<String> autoTerminatedRelationships = config.getAutoTerminatedRelationships();
if (isNotNull(autoTerminatedRelationships)) {
for (final String relationshipName : autoTerminatedRelationships) {
final Relationship relationship = new Relationship.Builder().name(relationshipName).build();
final Set<Connection> connections = processorNode.getConnections(relationship);
if (isNotNull(connections) && !connections.isEmpty()) {
validationErrors.add("Cannot automatically terminate '" + relationshipName + "' relationship because a Connection already exists with this relationship");
}
}
}
return validationErrors;
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class TestWriteAheadFlowFileRepository method testResourceClaimsIncremented.
@Test
public void testResourceClaimsIncremented() throws IOException {
final ResourceClaimManager claimManager = new StandardResourceClaimManager();
final TestQueueProvider queueProvider = new TestQueueProvider();
final Connection connection = Mockito.mock(Connection.class);
when(connection.getIdentifier()).thenReturn("1234");
when(connection.getDestination()).thenReturn(Mockito.mock(Connectable.class));
final FlowFileSwapManager swapMgr = new MockFlowFileSwapManager();
final FlowFileQueue queue = new StandardFlowFileQueue("1234", connection, null, null, claimManager, null, swapMgr, null, 10000);
when(connection.getFlowFileQueue()).thenReturn(queue);
queueProvider.addConnection(connection);
final ResourceClaim resourceClaim1 = claimManager.newResourceClaim("container", "section", "1", false, false);
final ContentClaim claim1 = new StandardContentClaim(resourceClaim1, 0L);
final ResourceClaim resourceClaim2 = claimManager.newResourceClaim("container", "section", "2", false, false);
final ContentClaim claim2 = new StandardContentClaim(resourceClaim2, 0L);
// resource claims' counts should be updated for both the swapped out FlowFile and the non-swapped out FlowFile
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(claimManager);
repo.loadFlowFiles(queueProvider, -1L);
// Create a Repository Record that indicates that a FlowFile was created
final FlowFileRecord flowFile1 = new StandardFlowFileRecord.Builder().id(1L).addAttribute("uuid", "11111111-1111-1111-1111-111111111111").contentClaim(claim1).build();
final StandardRepositoryRecord rec1 = new StandardRepositoryRecord(queue);
rec1.setWorking(flowFile1);
rec1.setDestination(queue);
// Create a Record that we can swap out
final FlowFileRecord flowFile2 = new StandardFlowFileRecord.Builder().id(2L).addAttribute("uuid", "11111111-1111-1111-1111-111111111112").contentClaim(claim2).build();
final StandardRepositoryRecord rec2 = new StandardRepositoryRecord(queue);
rec2.setWorking(flowFile2);
rec2.setDestination(queue);
final List<RepositoryRecord> records = new ArrayList<>();
records.add(rec1);
records.add(rec2);
repo.updateRepository(records);
final String swapLocation = swapMgr.swapOut(Collections.singletonList(flowFile2), queue);
repo.swapFlowFilesOut(Collections.singletonList(flowFile2), queue, swapLocation);
}
final ResourceClaimManager recoveryClaimManager = new StandardResourceClaimManager();
try (final WriteAheadFlowFileRepository repo = new WriteAheadFlowFileRepository(NiFiProperties.createBasicNiFiProperties(null, null))) {
repo.initialize(recoveryClaimManager);
final long largestId = repo.loadFlowFiles(queueProvider, 0L);
// largest ID known is 1 because this doesn't take into account the FlowFiles that have been swapped out
assertEquals(1, largestId);
}
// resource claim 1 will have a single claimant count while resource claim 2 will have no claimant counts
// because resource claim 2 is referenced only by flowfiles that are swapped out.
assertEquals(1, recoveryClaimManager.getClaimantCount(resourceClaim1));
assertEquals(0, recoveryClaimManager.getClaimantCount(resourceClaim2));
final SwapSummary summary = queue.recoverSwappedFlowFiles();
assertNotNull(summary);
assertEquals(2, summary.getMaxFlowFileId().intValue());
assertEquals(new QueueSize(1, 0L), summary.getQueueSize());
final List<ResourceClaim> swappedOutClaims = summary.getResourceClaims();
assertNotNull(swappedOutClaims);
assertEquals(1, swappedOutClaims.size());
assertEquals(claim2.getResourceClaim(), swappedOutClaims.get(0));
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class TestProcessorLifecycle method validateProcessorDeletion.
/**
* Test deletion of processor when connected to another
*
* @throws Exception exception
*/
@Test
public void validateProcessorDeletion() throws Exception {
final FlowControllerAndSystemBundle fcsb = this.buildFlowControllerForTest();
fc = fcsb.getFlowController();
ProcessGroup testGroup = fc.createProcessGroup(UUID.randomUUID().toString());
this.setControllerRootGroup(fc, testGroup);
ProcessorNode testProcNodeA = fc.createProcessor(TestProcessor.class.getName(), UUID.randomUUID().toString(), fcsb.getSystemBundle().getBundleDetails().getCoordinate());
testProcNodeA.setProperties(properties);
testGroup.addProcessor(testProcNodeA);
ProcessorNode testProcNodeB = fc.createProcessor(TestProcessor.class.getName(), UUID.randomUUID().toString(), fcsb.getSystemBundle().getBundleDetails().getCoordinate());
testProcNodeB.setProperties(properties);
testGroup.addProcessor(testProcNodeB);
Collection<String> relationNames = new ArrayList<>();
relationNames.add("relation");
Connection connection = fc.createConnection(UUID.randomUUID().toString(), Connection.class.getName(), testProcNodeA, testProcNodeB, relationNames);
testGroup.addConnection(connection);
ProcessScheduler ps = fc.getProcessScheduler();
ps.startProcessor(testProcNodeA, true);
ps.startProcessor(testProcNodeB, true);
try {
testGroup.removeProcessor(testProcNodeA);
fail();
} catch (Exception e) {
// should throw exception because processor running
}
try {
testGroup.removeProcessor(testProcNodeB);
fail();
} catch (Exception e) {
// should throw exception because processor running
}
ps.stopProcessor(testProcNodeB);
Thread.sleep(100);
try {
testGroup.removeProcessor(testProcNodeA);
fail();
} catch (Exception e) {
// should throw exception because destination processor running
}
try {
testGroup.removeProcessor(testProcNodeB);
fail();
} catch (Exception e) {
// should throw exception because source processor running
}
ps.stopProcessor(testProcNodeA);
Thread.sleep(100);
testGroup.removeProcessor(testProcNodeA);
testGroup.removeProcessor(testProcNodeB);
testGroup.shutdown();
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class AbstractFlowFileServerProtocol method checkPortStatus.
protected void checkPortStatus(final Peer peer, String portId) throws HandshakeException {
Port receivedPort = rootGroup.getInputPort(portId);
if (receivedPort == null) {
receivedPort = rootGroup.getOutputPort(portId);
}
if (receivedPort == null) {
logger.debug("Responding with ResponseCode UNKNOWN_PORT for identifier {}", portId);
throw new HandshakeException(ResponseCode.UNKNOWN_PORT, "Received unknown port identifier: " + portId);
}
if (!(receivedPort instanceof RootGroupPort)) {
logger.debug("Responding with ResponseCode UNKNOWN_PORT for identifier {}", portId);
throw new HandshakeException(ResponseCode.UNKNOWN_PORT, "Received port identifier " + portId + ", but this Port is not a RootGroupPort");
}
this.port = (RootGroupPort) receivedPort;
final PortAuthorizationResult portAuthResult = this.port.checkUserAuthorization(peer.getCommunicationsSession().getUserDn());
if (!portAuthResult.isAuthorized()) {
logger.debug("Responding with ResponseCode UNAUTHORIZED: ", portAuthResult.getExplanation());
throw new HandshakeException(ResponseCode.UNAUTHORIZED, portAuthResult.getExplanation());
}
if (!receivedPort.isValid()) {
logger.debug("Responding with ResponseCode PORT_NOT_IN_VALID_STATE for {}", receivedPort);
throw new HandshakeException(ResponseCode.PORT_NOT_IN_VALID_STATE, "Port is not valid");
}
if (!receivedPort.isRunning()) {
logger.debug("Responding with ResponseCode PORT_NOT_IN_VALID_STATE for {}", receivedPort);
throw new HandshakeException(ResponseCode.PORT_NOT_IN_VALID_STATE, "Port not running");
}
// we we will simply not service the request but the sender will timeout
if (getVersionNegotiator().getVersion() > 1) {
for (final Connection connection : port.getConnections()) {
if (connection.getFlowFileQueue().isFull()) {
logger.debug("Responding with ResponseCode PORTS_DESTINATION_FULL for {}", port);
throw new HandshakeException(ResponseCode.PORTS_DESTINATION_FULL, "Received port identifier " + portId + ", but its destination is full");
}
}
}
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class RelationshipAuditor method createConnectionAdvice.
/**
* Audits the creation of relationships via createConnection().
*
* This method only needs to be run 'after returning'. However, in Java 7 the order in which these methods are returned from Class.getDeclaredMethods (even though there is no order guaranteed)
* seems to differ from Java 6. SpringAOP depends on this ordering to determine advice precedence. By normalizing all advice into Around advice we can alleviate this issue.
*
* @param proceedingJoinPoint join point
* @return connection
* @throws java.lang.Throwable ex
*/
@Around("within(org.apache.nifi.web.dao.ConnectionDAO+) && " + "execution(org.apache.nifi.connectable.Connection createConnection(java.lang.String, org.apache.nifi.web.api.dto.ConnectionDTO))")
public Connection createConnectionAdvice(ProceedingJoinPoint proceedingJoinPoint) throws Throwable {
// perform the underlying operation
Connection connection = (Connection) proceedingJoinPoint.proceed();
// audit the connection creation
final ConnectDetails connectDetails = createConnectDetails(connection, connection.getRelationships());
final Action action = generateAuditRecordForConnection(connection, Operation.Connect, connectDetails);
// save the actions
if (action != null) {
saveAction(action, logger);
}
return connection;
}
Aggregations