use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessorNode method getDestinations.
public Set<Connectable> getDestinations(final Relationship relationship) {
final Set<Connectable> destinationSet = new HashSet<>();
final Set<Connection> relationshipConnections = connections.get(relationship);
if (relationshipConnections != null) {
for (final Connection connection : relationshipConnections) {
destinationSet.add(destinations.get(connection));
}
}
return destinationSet;
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class StandardProcessSession method commit.
@SuppressWarnings({ "unchecked", "rawtypes" })
private void commit(final Checkpoint checkpoint) {
try {
final long commitStartNanos = System.nanoTime();
resetReadClaim();
try {
claimCache.flush();
} finally {
claimCache.reset();
}
final long updateProvenanceStart = System.nanoTime();
updateProvenanceRepo(checkpoint);
final long claimRemovalStart = System.nanoTime();
final long updateProvenanceNanos = claimRemovalStart - updateProvenanceStart;
/**
* Figure out which content claims can be released. At this point,
* we will decrement the Claimant Count for the claims via the
* Content Repository. We do not actually destroy the content
* because otherwise, we could remove the Original Claim and
* crash/restart before the FlowFileRepository is updated. This will
* result in the FlowFile being restored such that the content claim
* points to the Original Claim -- which has already been removed!
*/
for (final Map.Entry<FlowFileRecord, StandardRepositoryRecord> entry : checkpoint.records.entrySet()) {
final FlowFile flowFile = entry.getKey();
final StandardRepositoryRecord record = entry.getValue();
if (record.isMarkedForDelete()) {
// if the working claim is not the same as the original claim, we can immediately destroy the working claim
// because it was created in this session and is to be deleted. We don't need to wait for the FlowFile Repo to sync.
decrementClaimCount(record.getWorkingClaim());
if (record.getOriginalClaim() != null && !record.getOriginalClaim().equals(record.getWorkingClaim())) {
// if working & original claim are same, don't remove twice; we only want to remove the original
// if it's different from the working. Otherwise, we remove two claimant counts. This causes
// an issue if we only updated the FlowFile attributes.
decrementClaimCount(record.getOriginalClaim());
}
final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate();
final Connectable connectable = context.getConnectable();
final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable;
LOG.info("{} terminated by {}; life of FlowFile = {} ms", new Object[] { flowFile, terminator, flowFileLife });
} else if (record.isWorking() && record.getWorkingClaim() != record.getOriginalClaim()) {
// records which have been updated - remove original if exists
decrementClaimCount(record.getOriginalClaim());
}
}
final long claimRemovalFinishNanos = System.nanoTime();
final long claimRemovalNanos = claimRemovalFinishNanos - claimRemovalStart;
// Update the FlowFile Repository
try {
final Collection<StandardRepositoryRecord> repoRecords = checkpoint.records.values();
context.getFlowFileRepository().updateRepository((Collection) repoRecords);
} catch (final IOException ioe) {
// if we fail to commit the session, we need to roll back
// the checkpoints as well because none of the checkpoints
// were ever committed.
rollback(false, true);
throw new ProcessException("FlowFile Repository failed to update", ioe);
}
final long flowFileRepoUpdateFinishNanos = System.nanoTime();
final long flowFileRepoUpdateNanos = flowFileRepoUpdateFinishNanos - claimRemovalFinishNanos;
updateEventRepository(checkpoint);
final long updateEventRepositoryFinishNanos = System.nanoTime();
final long updateEventRepositoryNanos = updateEventRepositoryFinishNanos - flowFileRepoUpdateFinishNanos;
// transfer the flowfiles to the connections' queues.
final Map<FlowFileQueue, Collection<FlowFileRecord>> recordMap = new HashMap<>();
for (final StandardRepositoryRecord record : checkpoint.records.values()) {
if (record.isMarkedForAbort() || record.isMarkedForDelete()) {
// these don't need to be transferred
continue;
}
// in this case, we just ignore it, and it will be cleaned up by clearing the records map.
if (record.getCurrent() != null) {
Collection<FlowFileRecord> collection = recordMap.get(record.getDestination());
if (collection == null) {
collection = new ArrayList<>();
recordMap.put(record.getDestination(), collection);
}
collection.add(record.getCurrent());
}
}
for (final Map.Entry<FlowFileQueue, Collection<FlowFileRecord>> entry : recordMap.entrySet()) {
entry.getKey().putAll(entry.getValue());
}
final long enqueueFlowFileFinishNanos = System.nanoTime();
final long enqueueFlowFileNanos = enqueueFlowFileFinishNanos - updateEventRepositoryFinishNanos;
// Delete any files from disk that need to be removed.
for (final Path path : checkpoint.deleteOnCommit.values()) {
try {
Files.deleteIfExists(path);
} catch (final IOException e) {
throw new FlowFileAccessException("Unable to delete " + path.toFile().getAbsolutePath(), e);
}
}
checkpoint.deleteOnCommit.clear();
if (LOG.isInfoEnabled()) {
final String sessionSummary = summarizeEvents(checkpoint);
if (!sessionSummary.isEmpty()) {
LOG.info("{} for {}, committed the following events: {}", new Object[] { this, connectableDescription, sessionSummary });
}
}
for (final Map.Entry<String, Long> entry : checkpoint.countersOnCommit.entrySet()) {
context.adjustCounter(entry.getKey(), entry.getValue());
}
acknowledgeRecords();
resetState();
if (LOG.isDebugEnabled()) {
final StringBuilder timingInfo = new StringBuilder();
timingInfo.append("Session commit for ").append(this).append(" [").append(connectableDescription).append("]").append(" took ");
final long commitNanos = System.nanoTime() - commitStartNanos;
formatNanos(commitNanos, timingInfo);
timingInfo.append("; FlowFile Repository Update took ");
formatNanos(flowFileRepoUpdateNanos, timingInfo);
timingInfo.append("; Claim Removal took ");
formatNanos(claimRemovalNanos, timingInfo);
timingInfo.append("; FlowFile Event Update took ");
formatNanos(updateEventRepositoryNanos, timingInfo);
timingInfo.append("; Enqueuing FlowFiles took ");
formatNanos(enqueueFlowFileNanos, timingInfo);
timingInfo.append("; Updating Provenance Event Repository took ");
formatNanos(updateProvenanceNanos, timingInfo);
LOG.debug(timingInfo.toString());
}
} catch (final Exception e) {
try {
// if we fail to commit the session, we need to roll back
// the checkpoints as well because none of the checkpoints
// were ever committed.
rollback(false, true);
} catch (final Exception e1) {
e.addSuppressed(e1);
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else {
throw new ProcessException(e);
}
}
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class TimerDrivenSchedulingAgent method createTrigger.
private Runnable createTrigger(final ConnectableTask connectableTask, final LifecycleState scheduleState, final AtomicReference<ScheduledFuture<?>> futureRef) {
final Connectable connectable = connectableTask.getConnectable();
final Runnable yieldDetectionRunnable = new Runnable() {
@Override
public void run() {
// Call the task. It will return a boolean indicating whether or not we should yield
// based on a lack of work for to do for the component.
final InvocationResult invocationResult = connectableTask.invoke();
if (invocationResult.isYield()) {
logger.debug("Yielding {} due to {}", connectable, invocationResult.getYieldExplanation());
}
// If the component is yielded, cancel its future and re-submit it to run again
// after the yield has expired.
final long newYieldExpiration = connectable.getYieldExpiration();
final long now = System.currentTimeMillis();
if (newYieldExpiration > now) {
final long yieldMillis = newYieldExpiration - now;
final long scheduleMillis = connectable.getSchedulingPeriod(TimeUnit.MILLISECONDS);
final ScheduledFuture<?> scheduledFuture = futureRef.get();
if (scheduledFuture == null) {
return;
}
// so that we can do this again the next time that the component is yielded.
if (scheduledFuture.cancel(false)) {
final long yieldNanos = Math.max(TimeUnit.MILLISECONDS.toNanos(scheduleMillis), TimeUnit.MILLISECONDS.toNanos(yieldMillis));
synchronized (scheduleState) {
if (scheduleState.isScheduled()) {
final long schedulingNanos = connectable.getSchedulingPeriod(TimeUnit.NANOSECONDS);
final ScheduledFuture<?> newFuture = flowEngine.scheduleWithFixedDelay(this, yieldNanos, schedulingNanos, TimeUnit.NANOSECONDS);
scheduleState.replaceFuture(scheduledFuture, newFuture);
futureRef.set(newFuture);
}
}
}
} else if (noWorkYieldNanos > 0L && invocationResult.isYield()) {
// Component itself didn't yield but there was no work to do, so the framework will choose
// to yield the component automatically for a short period of time.
final ScheduledFuture<?> scheduledFuture = futureRef.get();
if (scheduledFuture == null) {
return;
}
// so that we can do this again the next time that the component is yielded.
if (scheduledFuture.cancel(false)) {
synchronized (scheduleState) {
if (scheduleState.isScheduled()) {
final ScheduledFuture<?> newFuture = flowEngine.scheduleWithFixedDelay(this, noWorkYieldNanos, connectable.getSchedulingPeriod(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
scheduleState.replaceFuture(scheduledFuture, newFuture);
futureRef.set(newFuture);
}
}
}
}
}
};
return yieldDetectionRunnable;
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class FlowController method createLocalDataAuthorizable.
@Override
public Authorizable createLocalDataAuthorizable(final String componentId) {
final String rootGroupId = getRootGroupId();
// Provenance Events are generated only by connectable components, with the exception of DOWNLOAD events,
// which have the root process group's identifier assigned as the component ID, and DROP events, which
// could have the connection identifier assigned as the component ID. So, we check if the component ID
// is set to the root group and otherwise assume that the ID is that of a connectable or connection.
final DataAuthorizable authorizable;
if (rootGroupId.equals(componentId)) {
authorizable = new DataAuthorizable(getRootGroup());
} else {
// check if the component is a connectable, this should be the case most often
final Connectable connectable = getRootGroup().findLocalConnectable(componentId);
if (connectable == null) {
// if the component id is not a connectable then consider a connection
final Connection connection = getRootGroup().findConnection(componentId);
if (connection == null) {
throw new ResourceNotFoundException("The component that generated this event is no longer part of the data flow.");
} else {
// authorizable for connection data is associated with the source connectable
authorizable = new DataAuthorizable(connection.getSource());
}
} else {
authorizable = new DataAuthorizable(connectable);
}
}
return authorizable;
}
use of org.apache.nifi.connectable.Connectable in project nifi by apache.
the class FlowController method instantiateSnippet.
private void instantiateSnippet(final ProcessGroup group, final FlowSnippetDTO dto, final boolean topLevel) throws ProcessorInstantiationException {
writeLock.lock();
try {
validateSnippetContents(requireNonNull(group), dto);
//
for (final ControllerServiceDTO controllerServiceDTO : dto.getControllerServices()) {
final BundleCoordinate bundleCoordinate = BundleUtils.getBundle(controllerServiceDTO.getType(), controllerServiceDTO.getBundle());
final ControllerServiceNode serviceNode = createControllerService(controllerServiceDTO.getType(), controllerServiceDTO.getId(), bundleCoordinate, Collections.emptySet(), true);
serviceNode.setAnnotationData(controllerServiceDTO.getAnnotationData());
serviceNode.setComments(controllerServiceDTO.getComments());
serviceNode.setName(controllerServiceDTO.getName());
if (!topLevel) {
serviceNode.setVersionedComponentId(controllerServiceDTO.getVersionedComponentId());
}
group.addControllerService(serviceNode);
}
// references another service.
for (final ControllerServiceDTO controllerServiceDTO : dto.getControllerServices()) {
final String serviceId = controllerServiceDTO.getId();
final ControllerServiceNode serviceNode = getControllerServiceNode(serviceId);
serviceNode.setProperties(controllerServiceDTO.getProperties());
}
//
for (final LabelDTO labelDTO : dto.getLabels()) {
final Label label = createLabel(labelDTO.getId(), labelDTO.getLabel());
label.setPosition(toPosition(labelDTO.getPosition()));
if (labelDTO.getWidth() != null && labelDTO.getHeight() != null) {
label.setSize(new Size(labelDTO.getWidth(), labelDTO.getHeight()));
}
label.setStyle(labelDTO.getStyle());
if (!topLevel) {
label.setVersionedComponentId(labelDTO.getVersionedComponentId());
}
group.addLabel(label);
}
// Instantiate the funnels
for (final FunnelDTO funnelDTO : dto.getFunnels()) {
final Funnel funnel = createFunnel(funnelDTO.getId());
funnel.setPosition(toPosition(funnelDTO.getPosition()));
if (!topLevel) {
funnel.setVersionedComponentId(funnelDTO.getVersionedComponentId());
}
group.addFunnel(funnel);
}
//
for (final PortDTO portDTO : dto.getInputPorts()) {
final Port inputPort;
if (group.isRootGroup()) {
inputPort = createRemoteInputPort(portDTO.getId(), portDTO.getName());
inputPort.setMaxConcurrentTasks(portDTO.getConcurrentlySchedulableTaskCount());
if (portDTO.getGroupAccessControl() != null) {
((RootGroupPort) inputPort).setGroupAccessControl(portDTO.getGroupAccessControl());
}
if (portDTO.getUserAccessControl() != null) {
((RootGroupPort) inputPort).setUserAccessControl(portDTO.getUserAccessControl());
}
} else {
inputPort = createLocalInputPort(portDTO.getId(), portDTO.getName());
}
if (!topLevel) {
inputPort.setVersionedComponentId(portDTO.getVersionedComponentId());
}
inputPort.setPosition(toPosition(portDTO.getPosition()));
inputPort.setProcessGroup(group);
inputPort.setComments(portDTO.getComments());
group.addInputPort(inputPort);
}
for (final PortDTO portDTO : dto.getOutputPorts()) {
final Port outputPort;
if (group.isRootGroup()) {
outputPort = createRemoteOutputPort(portDTO.getId(), portDTO.getName());
outputPort.setMaxConcurrentTasks(portDTO.getConcurrentlySchedulableTaskCount());
if (portDTO.getGroupAccessControl() != null) {
((RootGroupPort) outputPort).setGroupAccessControl(portDTO.getGroupAccessControl());
}
if (portDTO.getUserAccessControl() != null) {
((RootGroupPort) outputPort).setUserAccessControl(portDTO.getUserAccessControl());
}
} else {
outputPort = createLocalOutputPort(portDTO.getId(), portDTO.getName());
}
if (!topLevel) {
outputPort.setVersionedComponentId(portDTO.getVersionedComponentId());
}
outputPort.setPosition(toPosition(portDTO.getPosition()));
outputPort.setProcessGroup(group);
outputPort.setComments(portDTO.getComments());
group.addOutputPort(outputPort);
}
//
for (final ProcessorDTO processorDTO : dto.getProcessors()) {
final BundleCoordinate bundleCoordinate = BundleUtils.getBundle(processorDTO.getType(), processorDTO.getBundle());
final ProcessorNode procNode = createProcessor(processorDTO.getType(), processorDTO.getId(), bundleCoordinate);
procNode.setPosition(toPosition(processorDTO.getPosition()));
procNode.setProcessGroup(group);
if (!topLevel) {
procNode.setVersionedComponentId(processorDTO.getVersionedComponentId());
}
final ProcessorConfigDTO config = processorDTO.getConfig();
procNode.setComments(config.getComments());
if (config.isLossTolerant() != null) {
procNode.setLossTolerant(config.isLossTolerant());
}
procNode.setName(processorDTO.getName());
procNode.setYieldPeriod(config.getYieldDuration());
procNode.setPenalizationPeriod(config.getPenaltyDuration());
procNode.setBulletinLevel(LogLevel.valueOf(config.getBulletinLevel()));
procNode.setAnnotationData(config.getAnnotationData());
procNode.setStyle(processorDTO.getStyle());
if (config.getRunDurationMillis() != null) {
procNode.setRunDuration(config.getRunDurationMillis(), TimeUnit.MILLISECONDS);
}
if (config.getSchedulingStrategy() != null) {
procNode.setSchedulingStrategy(SchedulingStrategy.valueOf(config.getSchedulingStrategy()));
}
if (config.getExecutionNode() != null) {
procNode.setExecutionNode(ExecutionNode.valueOf(config.getExecutionNode()));
}
if (processorDTO.getState().equals(ScheduledState.DISABLED.toString())) {
procNode.disable();
}
// ensure that the scheduling strategy is set prior to these values
procNode.setMaxConcurrentTasks(config.getConcurrentlySchedulableTaskCount());
procNode.setScheduldingPeriod(config.getSchedulingPeriod());
final Set<Relationship> relationships = new HashSet<>();
if (processorDTO.getRelationships() != null) {
for (final RelationshipDTO rel : processorDTO.getRelationships()) {
if (rel.isAutoTerminate()) {
relationships.add(procNode.getRelationship(rel.getName()));
}
}
procNode.setAutoTerminatedRelationships(relationships);
}
if (config.getProperties() != null) {
procNode.setProperties(config.getProperties());
}
group.addProcessor(procNode);
}
//
for (final RemoteProcessGroupDTO remoteGroupDTO : dto.getRemoteProcessGroups()) {
final RemoteProcessGroup remoteGroup = createRemoteProcessGroup(remoteGroupDTO.getId(), remoteGroupDTO.getTargetUris());
remoteGroup.setComments(remoteGroupDTO.getComments());
remoteGroup.setPosition(toPosition(remoteGroupDTO.getPosition()));
remoteGroup.setCommunicationsTimeout(remoteGroupDTO.getCommunicationsTimeout());
remoteGroup.setYieldDuration(remoteGroupDTO.getYieldDuration());
if (!topLevel) {
remoteGroup.setVersionedComponentId(remoteGroupDTO.getVersionedComponentId());
}
if (remoteGroupDTO.getTransportProtocol() == null) {
remoteGroup.setTransportProtocol(SiteToSiteTransportProtocol.RAW);
} else {
remoteGroup.setTransportProtocol(SiteToSiteTransportProtocol.valueOf(remoteGroupDTO.getTransportProtocol()));
}
remoteGroup.setProxyHost(remoteGroupDTO.getProxyHost());
remoteGroup.setProxyPort(remoteGroupDTO.getProxyPort());
remoteGroup.setProxyUser(remoteGroupDTO.getProxyUser());
remoteGroup.setProxyPassword(remoteGroupDTO.getProxyPassword());
remoteGroup.setProcessGroup(group);
// set the input/output ports
if (remoteGroupDTO.getContents() != null) {
final RemoteProcessGroupContentsDTO contents = remoteGroupDTO.getContents();
// ensure there are input ports
if (contents.getInputPorts() != null) {
remoteGroup.setInputPorts(convertRemotePort(contents.getInputPorts()), false);
}
// ensure there are output ports
if (contents.getOutputPorts() != null) {
remoteGroup.setOutputPorts(convertRemotePort(contents.getOutputPorts()), false);
}
}
group.addRemoteProcessGroup(remoteGroup);
}
//
for (final ProcessGroupDTO groupDTO : dto.getProcessGroups()) {
final ProcessGroup childGroup = createProcessGroup(groupDTO.getId());
childGroup.setParent(group);
childGroup.setPosition(toPosition(groupDTO.getPosition()));
childGroup.setComments(groupDTO.getComments());
childGroup.setName(groupDTO.getName());
if (groupDTO.getVariables() != null) {
childGroup.setVariables(groupDTO.getVariables());
}
// We do this only if this component is the child of a Versioned Component.
if (!topLevel) {
childGroup.setVersionedComponentId(groupDTO.getVersionedComponentId());
}
group.addProcessGroup(childGroup);
final FlowSnippetDTO contents = groupDTO.getContents();
// we want this to be recursive, so we will create a new template that contains only
// the contents of this child group and recursively call ourselves.
final FlowSnippetDTO childTemplateDTO = new FlowSnippetDTO();
childTemplateDTO.setConnections(contents.getConnections());
childTemplateDTO.setInputPorts(contents.getInputPorts());
childTemplateDTO.setLabels(contents.getLabels());
childTemplateDTO.setOutputPorts(contents.getOutputPorts());
childTemplateDTO.setProcessGroups(contents.getProcessGroups());
childTemplateDTO.setProcessors(contents.getProcessors());
childTemplateDTO.setFunnels(contents.getFunnels());
childTemplateDTO.setRemoteProcessGroups(contents.getRemoteProcessGroups());
childTemplateDTO.setControllerServices(contents.getControllerServices());
instantiateSnippet(childGroup, childTemplateDTO, false);
if (groupDTO.getVersionControlInformation() != null) {
final VersionControlInformation vci = StandardVersionControlInformation.Builder.fromDto(groupDTO.getVersionControlInformation()).build();
childGroup.setVersionControlInformation(vci, Collections.emptyMap());
}
}
//
for (final ConnectionDTO connectionDTO : dto.getConnections()) {
final ConnectableDTO sourceDTO = connectionDTO.getSource();
final ConnectableDTO destinationDTO = connectionDTO.getDestination();
final Connectable source;
final Connectable destination;
// see if the source connectable is a remote port
if (ConnectableType.REMOTE_OUTPUT_PORT.name().equals(sourceDTO.getType())) {
final RemoteProcessGroup remoteGroup = group.getRemoteProcessGroup(sourceDTO.getGroupId());
source = remoteGroup.getOutputPort(sourceDTO.getId());
} else {
final ProcessGroup sourceGroup = getConnectableParent(group, sourceDTO.getGroupId());
source = sourceGroup.getConnectable(sourceDTO.getId());
}
// see if the destination connectable is a remote port
if (ConnectableType.REMOTE_INPUT_PORT.name().equals(destinationDTO.getType())) {
final RemoteProcessGroup remoteGroup = group.getRemoteProcessGroup(destinationDTO.getGroupId());
destination = remoteGroup.getInputPort(destinationDTO.getId());
} else {
final ProcessGroup destinationGroup = getConnectableParent(group, destinationDTO.getGroupId());
destination = destinationGroup.getConnectable(destinationDTO.getId());
}
// determine the selection relationships for this connection
final Set<String> relationships = new HashSet<>();
if (connectionDTO.getSelectedRelationships() != null) {
relationships.addAll(connectionDTO.getSelectedRelationships());
}
final Connection connection = createConnection(connectionDTO.getId(), connectionDTO.getName(), source, destination, relationships);
if (!topLevel) {
connection.setVersionedComponentId(connectionDTO.getVersionedComponentId());
}
if (connectionDTO.getBends() != null) {
final List<Position> bendPoints = new ArrayList<>();
for (final PositionDTO bend : connectionDTO.getBends()) {
bendPoints.add(new Position(bend.getX(), bend.getY()));
}
connection.setBendPoints(bendPoints);
}
final FlowFileQueue queue = connection.getFlowFileQueue();
queue.setBackPressureDataSizeThreshold(connectionDTO.getBackPressureDataSizeThreshold());
queue.setBackPressureObjectThreshold(connectionDTO.getBackPressureObjectThreshold());
queue.setFlowFileExpiration(connectionDTO.getFlowFileExpiration());
final List<String> prioritizers = connectionDTO.getPrioritizers();
if (prioritizers != null) {
final List<String> newPrioritizersClasses = new ArrayList<>(prioritizers);
final List<FlowFilePrioritizer> newPrioritizers = new ArrayList<>();
for (final String className : newPrioritizersClasses) {
try {
newPrioritizers.add(createPrioritizer(className));
} catch (final ClassNotFoundException | InstantiationException | IllegalAccessException e) {
throw new IllegalArgumentException("Unable to set prioritizer " + className + ": " + e);
}
}
queue.setPriorities(newPrioritizers);
}
connection.setProcessGroup(group);
group.addConnection(connection);
}
} finally {
writeLock.unlock();
}
}
Aggregations