use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class RepositoryContext method isRelationshipAvailabilitySatisfied.
/**
* A Relationship is said to be Available if and only if all Connections for that Relationship are either self-loops or have non-full queues.
*
* @param requiredNumber minimum number of relationships that must have availability
* @return Checks if at least <code>requiredNumber</code> of Relationationships are "available." If so, returns <code>true</code>, otherwise returns <code>false</code>
*/
public boolean isRelationshipAvailabilitySatisfied(final int requiredNumber) {
int unavailable = 0;
final Collection<Relationship> allRelationships = connectable.getRelationships();
final int numRelationships = allRelationships.size();
// the maximum number of Relationships that can be unavailable and still return true.
final int maxUnavailable = numRelationships - requiredNumber;
for (final Relationship relationship : allRelationships) {
final Collection<Connection> connections = connectable.getConnections(relationship);
if (connections != null && !connections.isEmpty()) {
boolean available = true;
for (final Connection connection : connections) {
// consider self-loops available
if (connection.getSource() == connection.getDestination()) {
continue;
}
if (connection.getFlowFileQueue().isFull()) {
available = false;
break;
}
}
if (!available) {
unavailable++;
if (unavailable > maxUnavailable) {
return false;
}
}
}
}
return true;
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class StandardProcessSession method get.
@Override
public FlowFile get() {
verifyTaskActive();
final List<Connection> connections = context.getPollableConnections();
final int numConnections = connections.size();
for (int numAttempts = 0; numAttempts < numConnections; numAttempts++) {
final Connection conn = connections.get(context.getNextIncomingConnectionIndex() % numConnections);
final Set<FlowFileRecord> expired = new HashSet<>();
final FlowFileRecord flowFile = conn.poll(expired);
removeExpired(expired, conn);
if (flowFile != null) {
registerDequeuedRecord(flowFile, conn);
return flowFile;
}
}
return null;
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class StandardProcessSession method migrate.
private void migrate(final StandardProcessSession newOwner, Collection<FlowFile> flowFiles) {
// We don't call validateRecordState() here because we want to allow migration of FlowFiles that have already been marked as removed or transferred, etc.
flowFiles = flowFiles.stream().map(this::getMostRecent).collect(Collectors.toList());
for (final FlowFile flowFile : flowFiles) {
if (openInputStreams.containsKey(flowFile)) {
throw new IllegalStateException(flowFile + " cannot be migrated to a new Process Session because this session currently " + "has an open InputStream for the FlowFile, created by calling ProcessSession.read(FlowFile)");
}
if (openOutputStreams.containsKey(flowFile)) {
throw new IllegalStateException(flowFile + " cannot be migrated to a new Process Session because this session currently " + "has an open OutputStream for the FlowFile, created by calling ProcessSession.write(FlowFile)");
}
if (readRecursionSet.containsKey(flowFile)) {
throw new IllegalStateException(flowFile + " already in use for an active callback or InputStream created by ProcessSession.read(FlowFile) has not been closed");
}
if (writeRecursionSet.contains(flowFile)) {
throw new IllegalStateException(flowFile + " already in use for an active callback or OutputStream created by ProcessSession.write(FlowFile) has not been closed");
}
final StandardRepositoryRecord record = records.get(flowFile);
if (record == null) {
throw new FlowFileHandlingException(flowFile + " is not known in this session (" + toString() + ")");
}
}
// If we have a FORK event for one of the given FlowFiles, then all children must also be migrated. Otherwise, we
// could have a case where we have FlowFile A transferred and eventually exiting the flow and later the 'newOwner'
// ProcessSession is committed, claiming to have created FlowFiles from the parent, which is no longer even in
// the flow. This would be very confusing when looking at the provenance for the FlowFile, so it is best to avoid this.
final Set<String> flowFileIds = flowFiles.stream().map(ff -> ff.getAttribute(CoreAttributes.UUID.key())).collect(Collectors.toSet());
for (final Map.Entry<FlowFile, ProvenanceEventBuilder> entry : forkEventBuilders.entrySet()) {
final FlowFile eventFlowFile = entry.getKey();
if (flowFiles.contains(eventFlowFile)) {
final ProvenanceEventBuilder eventBuilder = entry.getValue();
for (final String childId : eventBuilder.getChildFlowFileIds()) {
if (!flowFileIds.contains(childId)) {
throw new IllegalStateException("Cannot migrate " + eventFlowFile + " to a new session because it was forked to create " + eventBuilder.getChildFlowFileIds().size() + " children and not all children are being migrated. If any FlowFile is forked, all of its children must also be migrated at the same time as the forked FlowFile");
}
}
}
}
// event builder for the new owner of the FlowFile and remove the child from our fork event builder.
for (final Map.Entry<FlowFile, ProvenanceEventBuilder> entry : forkEventBuilders.entrySet()) {
final FlowFile eventFlowFile = entry.getKey();
final ProvenanceEventBuilder eventBuilder = entry.getValue();
final Set<String> childrenIds = new HashSet<>(eventBuilder.getChildFlowFileIds());
ProvenanceEventBuilder copy = null;
for (final FlowFile flowFile : flowFiles) {
final String flowFileId = flowFile.getAttribute(CoreAttributes.UUID.key());
if (childrenIds.contains(flowFileId)) {
eventBuilder.removeChildFlowFile(flowFile);
if (copy == null) {
copy = eventBuilder.copy();
copy.getChildFlowFileIds().clear();
}
copy.addChildFlowFile(flowFileId);
}
}
if (copy != null) {
newOwner.forkEventBuilders.put(eventFlowFile, copy);
}
}
newOwner.processingStartTime = Math.min(newOwner.processingStartTime, processingStartTime);
for (final FlowFile flowFile : flowFiles) {
final FlowFileRecord flowFileRecord = (FlowFileRecord) flowFile;
final StandardRepositoryRecord repoRecord = this.records.remove(flowFile);
newOwner.records.put(flowFileRecord, repoRecord);
// Adjust the counts for Connections for each FlowFile that was pulled from a Connection.
// We do not have to worry about accounting for 'input counts' on connections because those
// are incremented only during a checkpoint, and anything that's been checkpointed has
// also been committed above.
final FlowFileQueue inputQueue = repoRecord.getOriginalQueue();
if (inputQueue != null) {
final String connectionId = inputQueue.getIdentifier();
incrementConnectionOutputCounts(connectionId, -1, -repoRecord.getOriginal().getSize());
newOwner.incrementConnectionOutputCounts(connectionId, 1, repoRecord.getOriginal().getSize());
unacknowledgedFlowFiles.get(inputQueue).remove(flowFile);
newOwner.unacknowledgedFlowFiles.computeIfAbsent(inputQueue, queue -> new HashSet<>()).add(flowFileRecord);
flowFilesIn--;
contentSizeIn -= flowFile.getSize();
newOwner.flowFilesIn++;
newOwner.contentSizeIn += flowFile.getSize();
}
final String flowFileId = flowFile.getAttribute(CoreAttributes.UUID.key());
if (removedFlowFiles.remove(flowFileId)) {
newOwner.removedFlowFiles.add(flowFileId);
newOwner.removedCount++;
newOwner.removedBytes += flowFile.getSize();
removedCount--;
removedBytes -= flowFile.getSize();
}
if (createdFlowFiles.remove(flowFileId)) {
newOwner.createdFlowFiles.add(flowFileId);
}
if (repoRecord.getTransferRelationship() != null) {
flowFilesOut--;
contentSizeOut -= flowFile.getSize();
newOwner.flowFilesOut++;
newOwner.contentSizeOut += flowFile.getSize();
}
final List<ProvenanceEventRecord> events = generatedProvenanceEvents.remove(flowFile);
if (events != null) {
newOwner.generatedProvenanceEvents.put(flowFile, events);
}
final ContentClaim currentClaim = repoRecord.getCurrentClaim();
if (currentClaim != null) {
final ByteCountingOutputStream appendableStream = appendableStreams.remove(currentClaim);
if (appendableStream != null) {
newOwner.appendableStreams.put(currentClaim, appendableStream);
}
}
final Path toDelete = deleteOnCommit.remove(flowFile);
if (toDelete != null) {
newOwner.deleteOnCommit.put(flowFile, toDelete);
}
}
provenanceReporter.migrate(newOwner.provenanceReporter, flowFileIds);
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class ExpireFlowFiles method expireFlowFiles.
private void expireFlowFiles(final Connectable connectable) {
// determine if the incoming connections for this Connectable have Expiration configured.
boolean expirationConfigured = false;
for (final Connection incomingConn : connectable.getIncomingConnections()) {
if (FormatUtils.getTimeDuration(incomingConn.getFlowFileQueue().getFlowFileExpiration(), TimeUnit.MILLISECONDS) > 0) {
expirationConfigured = true;
break;
}
}
// If expiration is not configured... don't bother running through the FlowFileQueue
if (!expirationConfigured) {
return;
}
final StandardProcessSession session = createSession(connectable);
session.expireFlowFiles();
session.commit();
}
use of org.apache.nifi.connectable.Connection in project nifi by apache.
the class FlowController method initializeFlow.
public void initializeFlow() throws IOException {
writeLock.lock();
try {
// get all connections/queues and recover from swap files.
final List<Connection> connections = getGroup(getRootGroupId()).findAllConnections();
long maxIdFromSwapFiles = -1L;
if (flowFileRepository.isVolatile()) {
for (final Connection connection : connections) {
final FlowFileQueue queue = connection.getFlowFileQueue();
queue.purgeSwapFiles();
}
} else {
for (final Connection connection : connections) {
final FlowFileQueue queue = connection.getFlowFileQueue();
final SwapSummary swapSummary = queue.recoverSwappedFlowFiles();
if (swapSummary != null) {
final Long maxFlowFileId = swapSummary.getMaxFlowFileId();
if (maxFlowFileId != null && maxFlowFileId > maxIdFromSwapFiles) {
maxIdFromSwapFiles = maxFlowFileId;
}
for (final ResourceClaim resourceClaim : swapSummary.getResourceClaims()) {
resourceClaimManager.incrementClaimantCount(resourceClaim);
}
}
}
}
flowFileRepository.loadFlowFiles(this, maxIdFromSwapFiles + 1);
// Begin expiring FlowFiles that are old
final RepositoryContextFactory contextFactory = new RepositoryContextFactory(contentRepository, flowFileRepository, flowFileEventRepository, counterRepositoryRef.get(), provenanceRepository);
processScheduler.scheduleFrameworkTask(new ExpireFlowFiles(this, contextFactory), "Expire FlowFiles", 30L, 30L, TimeUnit.SECONDS);
// now that we've loaded the FlowFiles, this has restored our ContentClaims' states, so we can tell the
// ContentRepository to purge superfluous files
contentRepository.cleanup();
for (final RemoteSiteListener listener : externalSiteListeners) {
listener.start();
}
notifyComponentsConfigurationRestored();
timerDrivenEngineRef.get().scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
try {
updateRemoteProcessGroups();
} catch (final Throwable t) {
LOG.warn("Unable to update Remote Process Groups due to " + t);
if (LOG.isDebugEnabled()) {
LOG.warn("", t);
}
}
}
}, 0L, 30L, TimeUnit.SECONDS);
timerDrivenEngineRef.get().scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
final ProcessGroup rootGroup = getRootGroup();
final List<ProcessGroup> allGroups = rootGroup.findAllProcessGroups();
allGroups.add(rootGroup);
for (final ProcessGroup group : allGroups) {
try {
group.synchronizeWithFlowRegistry(flowRegistryClient);
} catch (final Exception e) {
LOG.error("Failed to synchronize {} with Flow Registry", group, e);
}
}
}
}, 5, 60, TimeUnit.SECONDS);
initialized.set(true);
} finally {
writeLock.unlock();
}
}
Aggregations