use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessGroup method addProcessor.
@Override
public void addProcessor(final ProcessorNode processor) {
writeLock.lock();
try {
final String processorId = requireNonNull(processor).getIdentifier();
final ProcessorNode existingProcessor = processors.get(processorId);
if (existingProcessor != null) {
throw new IllegalStateException("A processor is already registered to this ProcessGroup with ID " + processorId);
}
processor.setProcessGroup(this);
processor.getVariableRegistry().setParent(getVariableRegistry());
processors.put(processorId, processor);
flowController.onProcessorAdded(processor);
onComponentModified();
} finally {
writeLock.unlock();
}
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessGroup method remove.
@Override
public void remove(final Snippet snippet) {
writeLock.lock();
try {
// ensure that all components are valid
verifyContents(snippet);
final Set<Connectable> connectables = getAllConnectables(snippet);
final Set<String> connectionIdsToRemove = new HashSet<>(getKeys(snippet.getConnections()));
// Remove all connections that are the output of any Connectable.
for (final Connectable connectable : connectables) {
for (final Connection conn : connectable.getConnections()) {
if (!connections.containsKey(conn.getIdentifier())) {
throw new IllegalStateException("Connectable component " + connectable.getIdentifier() + " cannot be removed because it has incoming connections from the parent Process Group");
}
connectionIdsToRemove.add(conn.getIdentifier());
}
}
// verify that all connections can be removed
for (final String id : connectionIdsToRemove) {
connections.get(id).verifyCanDelete();
}
// verify that all processors are stopped and have no active threads
for (final String procId : snippet.getProcessors().keySet()) {
final ProcessorNode procNode = getProcessor(procId);
if (procNode.isRunning()) {
throw new IllegalStateException("Processor " + procNode.getIdentifier() + " cannot be removed because it is running");
}
final int activeThreadCount = scheduler.getActiveThreadCount(procNode);
if (activeThreadCount != 0) {
throw new IllegalStateException("Processor " + procNode.getIdentifier() + " cannot be removed because it still has " + activeThreadCount + " active threads");
}
}
// verify that none of the connectables have incoming connections that are not in the Snippet.
final Set<String> connectionIds = snippet.getConnections().keySet();
for (final Connectable connectable : connectables) {
for (final Connection conn : connectable.getIncomingConnections()) {
if (!connectionIds.contains(conn.getIdentifier()) && !connectables.contains(conn.getSource())) {
throw new IllegalStateException("Connectable component " + connectable.getIdentifier() + " cannot be removed because it has incoming connections " + "that are not selected to be deleted");
}
}
}
// verify that all of the ProcessGroups in the snippet are empty
for (final String groupId : snippet.getProcessGroups().keySet()) {
final ProcessGroup toRemove = getProcessGroup(groupId);
toRemove.verifyCanDelete(true);
}
onComponentModified();
for (final String id : connectionIdsToRemove) {
removeConnection(connections.get(id));
}
for (final String id : getKeys(snippet.getInputPorts())) {
removeInputPort(inputPorts.get(id));
}
for (final String id : getKeys(snippet.getOutputPorts())) {
removeOutputPort(outputPorts.get(id));
}
for (final String id : getKeys(snippet.getFunnels())) {
removeFunnel(funnels.get(id));
}
for (final String id : getKeys(snippet.getLabels())) {
removeLabel(labels.get(id));
}
for (final String id : getKeys(snippet.getProcessors())) {
removeProcessor(processors.get(id));
}
for (final String id : getKeys(snippet.getRemoteProcessGroups())) {
removeRemoteProcessGroup(remoteGroups.get(id));
}
for (final String id : getKeys(snippet.getProcessGroups())) {
removeProcessGroup(processGroups.get(id));
}
} finally {
writeLock.unlock();
}
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessGroup method getCounts.
@Override
public ProcessGroupCounts getCounts() {
int inputPortCount = 0;
int outputPortCount = 0;
int running = 0;
int stopped = 0;
int invalid = 0;
int disabled = 0;
int activeRemotePorts = 0;
int inactiveRemotePorts = 0;
int upToDate = 0;
int locallyModified = 0;
int stale = 0;
int locallyModifiedAndStale = 0;
int syncFailure = 0;
readLock.lock();
try {
for (final ProcessorNode procNode : processors.values()) {
if (ScheduledState.DISABLED.equals(procNode.getScheduledState())) {
disabled++;
} else if (procNode.isRunning()) {
running++;
} else if (!procNode.isValid()) {
invalid++;
} else {
stopped++;
}
}
inputPortCount = inputPorts.size();
for (final Port port : inputPorts.values()) {
if (ScheduledState.DISABLED.equals(port.getScheduledState())) {
disabled++;
} else if (port.isRunning()) {
running++;
} else if (!port.isValid()) {
invalid++;
} else {
stopped++;
}
}
outputPortCount = outputPorts.size();
for (final Port port : outputPorts.values()) {
if (ScheduledState.DISABLED.equals(port.getScheduledState())) {
disabled++;
} else if (port.isRunning()) {
running++;
} else if (!port.isValid()) {
invalid++;
} else {
stopped++;
}
}
for (final ProcessGroup childGroup : processGroups.values()) {
final ProcessGroupCounts childCounts = childGroup.getCounts();
running += childCounts.getRunningCount();
stopped += childCounts.getStoppedCount();
invalid += childCounts.getInvalidCount();
disabled += childCounts.getDisabledCount();
// update the vci counts for this child group
final VersionControlInformation vci = childGroup.getVersionControlInformation();
if (vci != null) {
switch(vci.getStatus().getState()) {
case LOCALLY_MODIFIED:
locallyModified++;
break;
case LOCALLY_MODIFIED_AND_STALE:
locallyModifiedAndStale++;
break;
case STALE:
stale++;
break;
case SYNC_FAILURE:
syncFailure++;
break;
case UP_TO_DATE:
upToDate++;
break;
}
}
// update the vci counts for all nested groups within the child
upToDate += childCounts.getUpToDateCount();
locallyModified += childCounts.getLocallyModifiedCount();
stale += childCounts.getStaleCount();
locallyModifiedAndStale += childCounts.getLocallyModifiedAndStaleCount();
syncFailure += childCounts.getSyncFailureCount();
}
for (final RemoteProcessGroup remoteGroup : findAllRemoteProcessGroups()) {
// Count only input ports that have incoming connections
for (final Port port : remoteGroup.getInputPorts()) {
if (port.hasIncomingConnection()) {
if (port.isRunning()) {
activeRemotePorts++;
} else {
inactiveRemotePorts++;
}
}
}
// Count only output ports that have outgoing connections
for (final Port port : remoteGroup.getOutputPorts()) {
if (!port.getConnections().isEmpty()) {
if (port.isRunning()) {
activeRemotePorts++;
} else {
inactiveRemotePorts++;
}
}
}
final String authIssue = remoteGroup.getAuthorizationIssue();
if (authIssue != null) {
invalid++;
}
}
} finally {
readLock.unlock();
}
return new ProcessGroupCounts(inputPortCount, outputPortCount, running, stopped, invalid, disabled, activeRemotePorts, inactiveRemotePorts, upToDate, locallyModified, stale, locallyModifiedAndStale, syncFailure);
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessGroup method findLocalConnectable.
private static Connectable findLocalConnectable(final String identifier, final ProcessGroup group) {
final ProcessorNode procNode = group.getProcessor(identifier);
if (procNode != null) {
return procNode;
}
final Port inPort = group.getInputPort(identifier);
if (inPort != null) {
return inPort;
}
final Port outPort = group.getOutputPort(identifier);
if (outPort != null) {
return outPort;
}
final Funnel funnel = group.getFunnel(identifier);
if (funnel != null) {
return funnel;
}
for (final RemoteProcessGroup remoteProcessGroup : group.getRemoteProcessGroups()) {
final RemoteGroupPort remoteInputPort = remoteProcessGroup.getInputPort(identifier);
if (remoteInputPort != null) {
return remoteInputPort;
}
final RemoteGroupPort remoteOutputPort = remoteProcessGroup.getOutputPort(identifier);
if (remoteOutputPort != null) {
return remoteOutputPort;
}
}
for (final ProcessGroup childGroup : group.getProcessGroups()) {
final Connectable childGroupConnectable = findLocalConnectable(identifier, childGroup);
if (childGroupConnectable != null) {
return childGroupConnectable;
}
}
return null;
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessSession method removeExpired.
private void removeExpired(final Set<FlowFileRecord> flowFiles, final Connection connection) {
if (flowFiles.isEmpty()) {
return;
}
LOG.info("{} {} FlowFiles have expired and will be removed", new Object[] { this, flowFiles.size() });
final List<RepositoryRecord> expiredRecords = new ArrayList<>(flowFiles.size());
final Connectable connectable = context.getConnectable();
final String processorType = connectable.getComponentType();
final StandardProvenanceReporter expiredReporter = new StandardProvenanceReporter(this, connectable.getIdentifier(), processorType, context.getProvenanceRepository(), this);
final Map<String, FlowFileRecord> recordIdMap = new HashMap<>();
for (final FlowFileRecord flowFile : flowFiles) {
recordIdMap.put(flowFile.getAttribute(CoreAttributes.UUID.key()), flowFile);
final StandardRepositoryRecord record = new StandardRepositoryRecord(connection.getFlowFileQueue(), flowFile);
record.markForDelete();
expiredRecords.add(record);
expiredReporter.expire(flowFile, "Expiration Threshold = " + connection.getFlowFileQueue().getFlowFileExpiration());
decrementClaimCount(flowFile.getContentClaim());
final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate();
final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable;
LOG.info("{} terminated by {} due to FlowFile expiration; life of FlowFile = {} ms", new Object[] { flowFile, terminator, flowFileLife });
}
try {
final Iterable<ProvenanceEventRecord> iterable = new Iterable<ProvenanceEventRecord>() {
@Override
public Iterator<ProvenanceEventRecord> iterator() {
final Iterator<ProvenanceEventRecord> expiredEventIterator = expiredReporter.getEvents().iterator();
final Iterator<ProvenanceEventRecord> enrichingIterator = new Iterator<ProvenanceEventRecord>() {
@Override
public boolean hasNext() {
return expiredEventIterator.hasNext();
}
@Override
public ProvenanceEventRecord next() {
final ProvenanceEventRecord event = expiredEventIterator.next();
final StandardProvenanceEventRecord.Builder enriched = new StandardProvenanceEventRecord.Builder().fromEvent(event);
final FlowFileRecord record = recordIdMap.get(event.getFlowFileUuid());
if (record == null) {
return null;
}
final ContentClaim claim = record.getContentClaim();
if (claim != null) {
final ResourceClaim resourceClaim = claim.getResourceClaim();
enriched.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), record.getContentClaimOffset() + claim.getOffset(), record.getSize());
enriched.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), record.getContentClaimOffset() + claim.getOffset(), record.getSize());
}
enriched.setAttributes(record.getAttributes(), Collections.<String, String>emptyMap());
return enriched.build();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
return enrichingIterator;
}
};
context.getProvenanceRepository().registerEvents(iterable);
context.getFlowFileRepository().updateRepository(expiredRecords);
} catch (final IOException e) {
LOG.error("Failed to update FlowFile Repository to record expired records due to {}", e);
}
}
Aggregations