use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessGroup method isDisconnected.
private boolean isDisconnected(final Snippet snippet) {
final Set<Connectable> connectables = getAllConnectables(snippet);
for (final String id : getKeys(snippet.getRemoteProcessGroups())) {
final RemoteProcessGroup remoteGroup = getRemoteProcessGroup(id);
connectables.addAll(remoteGroup.getInputPorts());
connectables.addAll(remoteGroup.getOutputPorts());
}
final Set<String> connectionIds = snippet.getConnections().keySet();
for (final Connectable connectable : connectables) {
for (final Connection conn : connectable.getIncomingConnections()) {
if (!connectionIds.contains(conn.getIdentifier())) {
return false;
}
}
for (final Connection conn : connectable.getConnections()) {
if (!connectionIds.contains(conn.getIdentifier())) {
return false;
}
}
}
final Set<Connectable> recursiveConnectables = new HashSet<>(connectables);
for (final String id : snippet.getProcessGroups().keySet()) {
final ProcessGroup childGroup = getProcessGroup(id);
recursiveConnectables.addAll(findAllConnectables(childGroup, true));
}
for (final String id : connectionIds) {
final Connection connection = getConnection(id);
if (!recursiveConnectables.contains(connection.getSource()) || !recursiveConnectables.contains(connection.getDestination())) {
return false;
}
}
return true;
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessGroup method findLocalConnectable.
private static Connectable findLocalConnectable(final String identifier, final ProcessGroup group) {
final ProcessorNode procNode = group.getProcessor(identifier);
if (procNode != null) {
return procNode;
}
final Port inPort = group.getInputPort(identifier);
if (inPort != null) {
return inPort;
}
final Port outPort = group.getOutputPort(identifier);
if (outPort != null) {
return outPort;
}
final Funnel funnel = group.getFunnel(identifier);
if (funnel != null) {
return funnel;
}
for (final RemoteProcessGroup remoteProcessGroup : group.getRemoteProcessGroups()) {
final RemoteGroupPort remoteInputPort = remoteProcessGroup.getInputPort(identifier);
if (remoteInputPort != null) {
return remoteInputPort;
}
final RemoteGroupPort remoteOutputPort = remoteProcessGroup.getOutputPort(identifier);
if (remoteOutputPort != null) {
return remoteOutputPort;
}
}
for (final ProcessGroup childGroup : group.getProcessGroups()) {
final Connectable childGroupConnectable = findLocalConnectable(identifier, childGroup);
if (childGroupConnectable != null) {
return childGroupConnectable;
}
}
return null;
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessSession method removeExpired.
private void removeExpired(final Set<FlowFileRecord> flowFiles, final Connection connection) {
if (flowFiles.isEmpty()) {
return;
}
LOG.info("{} {} FlowFiles have expired and will be removed", new Object[] { this, flowFiles.size() });
final List<RepositoryRecord> expiredRecords = new ArrayList<>(flowFiles.size());
final Connectable connectable = context.getConnectable();
final String processorType = connectable.getComponentType();
final StandardProvenanceReporter expiredReporter = new StandardProvenanceReporter(this, connectable.getIdentifier(), processorType, context.getProvenanceRepository(), this);
final Map<String, FlowFileRecord> recordIdMap = new HashMap<>();
for (final FlowFileRecord flowFile : flowFiles) {
recordIdMap.put(flowFile.getAttribute(CoreAttributes.UUID.key()), flowFile);
final StandardRepositoryRecord record = new StandardRepositoryRecord(connection.getFlowFileQueue(), flowFile);
record.markForDelete();
expiredRecords.add(record);
expiredReporter.expire(flowFile, "Expiration Threshold = " + connection.getFlowFileQueue().getFlowFileExpiration());
decrementClaimCount(flowFile.getContentClaim());
final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate();
final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable;
LOG.info("{} terminated by {} due to FlowFile expiration; life of FlowFile = {} ms", new Object[] { flowFile, terminator, flowFileLife });
}
try {
final Iterable<ProvenanceEventRecord> iterable = new Iterable<ProvenanceEventRecord>() {
@Override
public Iterator<ProvenanceEventRecord> iterator() {
final Iterator<ProvenanceEventRecord> expiredEventIterator = expiredReporter.getEvents().iterator();
final Iterator<ProvenanceEventRecord> enrichingIterator = new Iterator<ProvenanceEventRecord>() {
@Override
public boolean hasNext() {
return expiredEventIterator.hasNext();
}
@Override
public ProvenanceEventRecord next() {
final ProvenanceEventRecord event = expiredEventIterator.next();
final StandardProvenanceEventRecord.Builder enriched = new StandardProvenanceEventRecord.Builder().fromEvent(event);
final FlowFileRecord record = recordIdMap.get(event.getFlowFileUuid());
if (record == null) {
return null;
}
final ContentClaim claim = record.getContentClaim();
if (claim != null) {
final ResourceClaim resourceClaim = claim.getResourceClaim();
enriched.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), record.getContentClaimOffset() + claim.getOffset(), record.getSize());
enriched.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), record.getContentClaimOffset() + claim.getOffset(), record.getSize());
}
enriched.setAttributes(record.getAttributes(), Collections.<String, String>emptyMap());
return enriched.build();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
return enrichingIterator;
}
};
context.getProvenanceRepository().registerEvents(iterable);
context.getFlowFileRepository().updateRepository(expiredRecords);
} catch (final IOException e) {
LOG.error("Failed to update FlowFile Repository to record expired records due to {}", e);
}
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessSession method registerForkEvent.
private void registerForkEvent(final FlowFile parent, final FlowFile child) {
ProvenanceEventBuilder eventBuilder = forkEventBuilders.get(parent);
if (eventBuilder == null) {
eventBuilder = context.getProvenanceRepository().eventBuilder();
eventBuilder.setEventType(ProvenanceEventType.FORK);
eventBuilder.setFlowFileEntryDate(parent.getEntryDate());
eventBuilder.setLineageStartDate(parent.getLineageStartDate());
eventBuilder.setFlowFileUUID(parent.getAttribute(CoreAttributes.UUID.key()));
eventBuilder.setComponentId(context.getConnectable().getIdentifier());
final Connectable connectable = context.getConnectable();
final String processorType = connectable.getComponentType();
eventBuilder.setComponentType(processorType);
eventBuilder.addParentFlowFile(parent);
updateEventContentClaims(eventBuilder, parent, records.get(parent));
forkEventBuilders.put(parent, eventBuilder);
}
eventBuilder.addChildFlowFile(child);
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessSession method rollback.
private void rollback(final boolean penalize, final boolean rollbackCheckpoint) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} session rollback called, FlowFile records are {} {}", this, loggableFlowfileInfo(), new Throwable("Stack Trace on rollback"));
}
deleteOnCommit.clear();
closeStreams(openInputStreams, "rolled back", "input");
closeStreams(openOutputStreams, "rolled back", "output");
try {
claimCache.reset();
} catch (IOException e1) {
LOG.warn("{} Attempted to close Output Stream for {} due to session rollback but close failed", this, this.connectableDescription, e1);
}
final Set<StandardRepositoryRecord> recordsToHandle = new HashSet<>();
recordsToHandle.addAll(records.values());
if (rollbackCheckpoint) {
final Checkpoint existingCheckpoint = this.checkpoint;
this.checkpoint = null;
if (existingCheckpoint != null && existingCheckpoint.records != null) {
recordsToHandle.addAll(existingCheckpoint.records.values());
}
}
resetWriteClaims();
resetReadClaim();
if (recordsToHandle.isEmpty()) {
LOG.trace("{} was rolled back, but no events were performed by this ProcessSession", this);
acknowledgeRecords();
resetState();
return;
}
for (final StandardRepositoryRecord record : recordsToHandle) {
// remove the working claims if they are different than the originals.
removeTemporaryClaim(record);
}
final Set<RepositoryRecord> abortedRecords = new HashSet<>();
final Set<StandardRepositoryRecord> transferRecords = new HashSet<>();
for (final StandardRepositoryRecord record : recordsToHandle) {
if (record.isMarkedForAbort()) {
decrementClaimCount(record.getWorkingClaim());
if (record.getCurrentClaim() != null && !record.getCurrentClaim().equals(record.getWorkingClaim())) {
// if working & original claim are same, don't remove twice; we only want to remove the original
// if it's different from the working. Otherwise, we remove two claimant counts. This causes
// an issue if we only updated the flowfile attributes.
decrementClaimCount(record.getCurrentClaim());
}
abortedRecords.add(record);
} else {
transferRecords.add(record);
}
}
// Put the FlowFiles that are not marked for abort back to their original queues
for (final StandardRepositoryRecord record : transferRecords) {
if (record.getOriginal() != null) {
final FlowFileQueue originalQueue = record.getOriginalQueue();
if (originalQueue != null) {
if (penalize) {
final long expirationEpochMillis = System.currentTimeMillis() + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS);
final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getOriginal()).penaltyExpirationTime(expirationEpochMillis).build();
originalQueue.put(newFile);
} else {
originalQueue.put(record.getOriginal());
}
}
}
}
if (!abortedRecords.isEmpty()) {
try {
context.getFlowFileRepository().updateRepository(abortedRecords);
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository for aborted records due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
// If we have transient claims that need to be cleaned up, do so.
final List<ContentClaim> transientClaims = recordsToHandle.stream().flatMap(record -> record.getTransientClaims().stream()).collect(Collectors.toList());
if (!transientClaims.isEmpty()) {
final RepositoryRecord repoRecord = new TransientClaimRepositoryRecord(transientClaims);
try {
context.getFlowFileRepository().updateRepository(Collections.singletonList(repoRecord));
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository to cleanup transient claims due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
final Connectable connectable = context.getConnectable();
final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(connectable.getIdentifier());
flowFileEvent.setBytesRead(bytesRead);
flowFileEvent.setBytesWritten(bytesWritten);
flowFileEvent.setCounters(immediateCounters);
// update event repository
try {
context.getFlowFileEventRepository().updateRepository(flowFileEvent);
} catch (final Exception e) {
LOG.error("Failed to update FlowFileEvent Repository due to " + e);
if (LOG.isDebugEnabled()) {
LOG.error("", e);
}
}
acknowledgeRecords();
resetState();
}
Aggregations