use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class StandardProcessorNode method addConnection.
@Override
public void addConnection(final Connection connection) {
Objects.requireNonNull(connection, "connection cannot be null");
if (!connection.getSource().equals(this) && !connection.getDestination().equals(this)) {
throw new IllegalStateException("Cannot a connection to a ProcessorNode for which the ProcessorNode is neither the Source nor the Destination");
}
List<Connection> updatedIncoming = null;
if (connection.getDestination().equals(this)) {
// don't add the connection twice. This may occur if we have a
// self-loop because we will be told
// to add the connection once because we are the source and again
// because we are the destination.
final List<Connection> incomingConnections = incomingConnectionsRef.get();
updatedIncoming = new ArrayList<>(incomingConnections);
if (!updatedIncoming.contains(connection)) {
updatedIncoming.add(connection);
}
}
if (connection.getSource().equals(this)) {
// because we are the destination.
if (!destinations.containsKey(connection)) {
for (final Relationship relationship : connection.getRelationships()) {
final Relationship rel = getRelationship(relationship.getName());
Set<Connection> set = connections.get(rel);
if (set == null) {
set = new HashSet<>();
connections.put(rel, set);
}
set.add(connection);
destinations.put(connection, connection.getDestination());
}
final Set<Relationship> autoTerminated = this.undefinedRelationshipsToTerminate.get();
if (autoTerminated != null) {
autoTerminated.removeAll(connection.getRelationships());
this.undefinedRelationshipsToTerminate.set(autoTerminated);
}
}
}
if (updatedIncoming != null) {
incomingConnectionsRef.set(Collections.unmodifiableList(updatedIncoming));
}
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class StandardProcessorNode method updateConnection.
@Override
public void updateConnection(final Connection connection) throws IllegalStateException {
if (requireNonNull(connection).getSource().equals(this)) {
// update any relationships
//
// first check if any relations were removed.
final List<Relationship> existingRelationships = new ArrayList<>();
for (final Map.Entry<Relationship, Set<Connection>> entry : connections.entrySet()) {
if (entry.getValue().contains(connection)) {
existingRelationships.add(entry.getKey());
}
}
for (final Relationship rel : connection.getRelationships()) {
if (!existingRelationships.contains(rel)) {
// relationship was removed. Check if this is legal.
final Set<Connection> connectionsForRelationship = getConnections(rel);
if (connectionsForRelationship != null && connectionsForRelationship.size() == 1 && this.isRunning() && !isAutoTerminated(rel) && getRelationships().contains(rel)) {
// this connection.
throw new IllegalStateException("Cannot remove relationship " + rel.getName() + " from Connection because doing so would invalidate Processor " + this + ", which is currently running");
}
}
}
// remove the connection from any list that currently contains
for (final Set<Connection> list : connections.values()) {
list.remove(connection);
}
// add the connection in for all relationships listed.
for (final Relationship rel : connection.getRelationships()) {
Set<Connection> set = connections.get(rel);
if (set == null) {
set = new HashSet<>();
connections.put(rel, set);
}
set.add(connection);
}
// update to the new destination
destinations.put(connection, connection.getDestination());
final Set<Relationship> autoTerminated = this.undefinedRelationshipsToTerminate.get();
if (autoTerminated != null) {
autoTerminated.removeAll(connection.getRelationships());
this.undefinedRelationshipsToTerminate.set(autoTerminated);
}
}
if (connection.getDestination().equals(this)) {
// update our incoming connections -- we can just remove & re-add
// the connection to update the list.
final List<Connection> incomingConnections = incomingConnectionsRef.get();
final List<Connection> updatedIncoming = new ArrayList<>(incomingConnections);
updatedIncoming.remove(connection);
updatedIncoming.add(connection);
incomingConnectionsRef.set(Collections.unmodifiableList(updatedIncoming));
}
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class RepositoryContext method isRelationshipAvailabilitySatisfied.
/**
* A Relationship is said to be Available if and only if all Connections for that Relationship are either self-loops or have non-full queues.
*
* @param requiredNumber minimum number of relationships that must have availability
* @return Checks if at least <code>requiredNumber</code> of Relationationships are "available." If so, returns <code>true</code>, otherwise returns <code>false</code>
*/
public boolean isRelationshipAvailabilitySatisfied(final int requiredNumber) {
int unavailable = 0;
final Collection<Relationship> allRelationships = connectable.getRelationships();
final int numRelationships = allRelationships.size();
// the maximum number of Relationships that can be unavailable and still return true.
final int maxUnavailable = numRelationships - requiredNumber;
for (final Relationship relationship : allRelationships) {
final Collection<Connection> connections = connectable.getConnections(relationship);
if (connections != null && !connections.isEmpty()) {
boolean available = true;
for (final Connection connection : connections) {
// consider self-loops available
if (connection.getSource() == connection.getDestination()) {
continue;
}
if (connection.getFlowFileQueue().isFull()) {
available = false;
break;
}
}
if (!available) {
unavailable++;
if (unavailable > maxUnavailable) {
return false;
}
}
}
}
return true;
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class StandardProcessSession method summarizeEvents.
private String summarizeEvents(final Checkpoint checkpoint) {
// relationship to flowfile ID's
final Map<Relationship, Set<String>> transferMap = new HashMap<>();
final Set<String> modifiedFlowFileIds = new HashSet<>();
int largestTransferSetSize = 0;
for (final Map.Entry<FlowFileRecord, StandardRepositoryRecord> entry : checkpoint.records.entrySet()) {
final FlowFile flowFile = entry.getKey();
final StandardRepositoryRecord record = entry.getValue();
final Relationship relationship = record.getTransferRelationship();
if (Relationship.SELF.equals(relationship)) {
continue;
}
Set<String> transferIds = transferMap.get(relationship);
if (transferIds == null) {
transferIds = new HashSet<>();
transferMap.put(relationship, transferIds);
}
transferIds.add(flowFile.getAttribute(CoreAttributes.UUID.key()));
largestTransferSetSize = Math.max(largestTransferSetSize, transferIds.size());
final ContentClaim workingClaim = record.getWorkingClaim();
if (workingClaim != null && workingClaim != record.getOriginalClaim() && record.getTransferRelationship() != null) {
modifiedFlowFileIds.add(flowFile.getAttribute(CoreAttributes.UUID.key()));
}
}
final int numRemoved = checkpoint.removedFlowFiles.size();
final int numModified = modifiedFlowFileIds.size();
final int numCreated = checkpoint.createdFlowFiles.size();
final StringBuilder sb = new StringBuilder(512);
if (!LOG.isDebugEnabled() && (largestTransferSetSize > VERBOSE_LOG_THRESHOLD || numModified > VERBOSE_LOG_THRESHOLD || numCreated > VERBOSE_LOG_THRESHOLD || numRemoved > VERBOSE_LOG_THRESHOLD)) {
if (numCreated > 0) {
sb.append("created ").append(numCreated).append(" FlowFiles, ");
}
if (numModified > 0) {
sb.append("modified ").append(modifiedFlowFileIds.size()).append(" FlowFiles, ");
}
if (numRemoved > 0) {
sb.append("removed ").append(numRemoved).append(" FlowFiles, ");
}
for (final Map.Entry<Relationship, Set<String>> entry : transferMap.entrySet()) {
if (entry.getKey() != null) {
sb.append("Transferred ").append(entry.getValue().size()).append(" FlowFiles");
final Relationship relationship = entry.getKey();
if (relationship != Relationship.ANONYMOUS) {
sb.append(" to '").append(relationship.getName()).append("', ");
}
}
}
} else {
if (numCreated > 0) {
sb.append("created FlowFiles ").append(checkpoint.createdFlowFiles).append(", ");
}
if (numModified > 0) {
sb.append("modified FlowFiles ").append(modifiedFlowFileIds).append(", ");
}
if (numRemoved > 0) {
sb.append("removed FlowFiles ").append(checkpoint.removedFlowFiles).append(", ");
}
for (final Map.Entry<Relationship, Set<String>> entry : transferMap.entrySet()) {
if (entry.getKey() != null) {
sb.append("Transferred FlowFiles ").append(entry.getValue());
final Relationship relationship = entry.getKey();
if (relationship != Relationship.ANONYMOUS) {
sb.append(" to '").append(relationship.getName()).append("', ");
}
}
}
}
if (sb.length() > 2 && sb.subSequence(sb.length() - 2, sb.length()).equals(", ")) {
sb.delete(sb.length() - 2, sb.length());
}
// don't add processing time if we did nothing, because we don't log the summary anyway
if (sb.length() > 0) {
final long processingNanos = checkpoint.processingTime;
sb.append(", Processing Time = ");
formatNanos(processingNanos, sb);
}
return sb.toString();
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class StandardFlowSerializer method addConnection.
private void addConnection(final Element parentElement, final Connection connection) {
final Document doc = parentElement.getOwnerDocument();
final Element element = doc.createElement("connection");
parentElement.appendChild(element);
addTextElement(element, "id", connection.getIdentifier());
addTextElement(element, "versionedComponentId", connection.getVersionedComponentId());
addTextElement(element, "name", connection.getName());
final Element bendPointsElement = doc.createElement("bendPoints");
element.appendChild(bendPointsElement);
for (final Position bendPoint : connection.getBendPoints()) {
addPosition(bendPointsElement, bendPoint, "bendPoint");
}
addTextElement(element, "labelIndex", connection.getLabelIndex());
addTextElement(element, "zIndex", connection.getZIndex());
final String sourceId = connection.getSource().getIdentifier();
final ConnectableType sourceType = connection.getSource().getConnectableType();
final String sourceGroupId;
if (sourceType == ConnectableType.REMOTE_OUTPUT_PORT) {
sourceGroupId = ((RemoteGroupPort) connection.getSource()).getRemoteProcessGroup().getIdentifier();
} else {
sourceGroupId = connection.getSource().getProcessGroup().getIdentifier();
}
final ConnectableType destinationType = connection.getDestination().getConnectableType();
final String destinationId = connection.getDestination().getIdentifier();
final String destinationGroupId;
if (destinationType == ConnectableType.REMOTE_INPUT_PORT) {
destinationGroupId = ((RemoteGroupPort) connection.getDestination()).getRemoteProcessGroup().getIdentifier();
} else {
destinationGroupId = connection.getDestination().getProcessGroup().getIdentifier();
}
addTextElement(element, "sourceId", sourceId);
addTextElement(element, "sourceGroupId", sourceGroupId);
addTextElement(element, "sourceType", sourceType.toString());
addTextElement(element, "destinationId", destinationId);
addTextElement(element, "destinationGroupId", destinationGroupId);
addTextElement(element, "destinationType", destinationType.toString());
for (final Relationship relationship : connection.getRelationships()) {
addTextElement(element, "relationship", relationship.getName());
}
addTextElement(element, "maxWorkQueueSize", connection.getFlowFileQueue().getBackPressureObjectThreshold());
addTextElement(element, "maxWorkQueueDataSize", connection.getFlowFileQueue().getBackPressureDataSizeThreshold());
addTextElement(element, "flowFileExpiration", connection.getFlowFileQueue().getFlowFileExpiration());
for (final FlowFilePrioritizer comparator : connection.getFlowFileQueue().getPriorities()) {
final String className = comparator.getClass().getCanonicalName();
addTextElement(element, "queuePrioritizerClass", className);
}
parentElement.appendChild(element);
}
Aggregations