use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessGroup method updateProcessGroup.
private void updateProcessGroup(final ProcessGroup group, final VersionedProcessGroup proposed, final String componentIdSeed, final Set<String> updatedVersionedComponentIds, final boolean updatePosition, final boolean updateName, final boolean updateDescendantVersionedGroups, final Set<String> variablesToSkip) throws ProcessorInstantiationException {
group.setComments(proposed.getComments());
if (updateName) {
group.setName(proposed.getName());
}
if (updatePosition && proposed.getPosition() != null) {
group.setPosition(new Position(proposed.getPosition().getX(), proposed.getPosition().getY()));
}
// Determine which variables have been added/removed and add/remove them from this group's variable registry.
// We don't worry about if a variable value has changed, because variables are designed to be 'environment specific.'
// As a result, once imported, we won't update variables to match the remote flow, but we will add any missing variables
// and remove any variables that are no longer part of the remote flow.
final Set<String> existingVariableNames = group.getVariableRegistry().getVariableMap().keySet().stream().map(VariableDescriptor::getName).collect(Collectors.toSet());
final Map<String, String> updatedVariableMap = new HashMap<>();
// If any new variables exist in the proposed flow, add those to the variable registry.
for (final Map.Entry<String, String> entry : proposed.getVariables().entrySet()) {
if (!existingVariableNames.contains(entry.getKey()) && !variablesToSkip.contains(entry.getKey())) {
updatedVariableMap.put(entry.getKey(), entry.getValue());
}
}
group.setVariables(updatedVariableMap);
final VersionedFlowCoordinates remoteCoordinates = proposed.getVersionedFlowCoordinates();
if (remoteCoordinates == null) {
group.disconnectVersionControl(false);
} else {
final String registryId = flowController.getFlowRegistryClient().getFlowRegistryId(remoteCoordinates.getRegistryUrl());
final String bucketId = remoteCoordinates.getBucketId();
final String flowId = remoteCoordinates.getFlowId();
final int version = remoteCoordinates.getVersion();
final FlowRegistry flowRegistry = flowController.getFlowRegistryClient().getFlowRegistry(registryId);
final String registryName = flowRegistry == null ? registryId : flowRegistry.getName();
final VersionedFlowState flowState = remoteCoordinates.getLatest() ? VersionedFlowState.UP_TO_DATE : VersionedFlowState.STALE;
final VersionControlInformation vci = new StandardVersionControlInformation.Builder().registryId(registryId).registryName(registryName).bucketId(bucketId).bucketName(bucketId).flowId(flowId).flowName(flowId).version(version).flowSnapshot(proposed).status(new StandardVersionedFlowStatus(flowState, flowState.getDescription())).build();
group.setVersionControlInformation(vci, Collections.emptyMap());
}
// Controller Services
// Controller Services have to be handled a bit differently than other components. This is because Processors and Controller
// Services may reference other Controller Services. Since we may be adding Service A, which depends on Service B, before adding
// Service B, we need to ensure that we create all Controller Services first and then call updateControllerService for each
// Controller Service. This way, we ensure that all services have been created before setting the properties. This allows us to
// properly obtain the correct mapping of Controller Service VersionedComponentID to Controller Service instance id.
final Map<String, ControllerServiceNode> servicesByVersionedId = group.getControllerServices(false).stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> controllerServicesRemoved = new HashSet<>(servicesByVersionedId.keySet());
final Map<ControllerServiceNode, VersionedControllerService> services = new HashMap<>();
// Add any Controller Service that does not yet exist.
for (final VersionedControllerService proposedService : proposed.getControllerServices()) {
ControllerServiceNode service = servicesByVersionedId.get(proposedService.getIdentifier());
if (service == null) {
service = addControllerService(group, proposedService, componentIdSeed);
LOG.info("Added {} to {}", service, this);
}
services.put(service, proposedService);
}
// Update all of the Controller Services to match the VersionedControllerService
for (final Map.Entry<ControllerServiceNode, VersionedControllerService> entry : services.entrySet()) {
final ControllerServiceNode service = entry.getKey();
final VersionedControllerService proposedService = entry.getValue();
if (updatedVersionedComponentIds.contains(proposedService.getIdentifier())) {
updateControllerService(service, proposedService);
LOG.info("Updated {}", service);
}
controllerServicesRemoved.remove(proposedService.getIdentifier());
}
// Child groups
final Map<String, ProcessGroup> childGroupsByVersionedId = group.getProcessGroups().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> childGroupsRemoved = new HashSet<>(childGroupsByVersionedId.keySet());
for (final VersionedProcessGroup proposedChildGroup : proposed.getProcessGroups()) {
final ProcessGroup childGroup = childGroupsByVersionedId.get(proposedChildGroup.getIdentifier());
final VersionedFlowCoordinates childCoordinates = proposedChildGroup.getVersionedFlowCoordinates();
if (childGroup == null) {
final ProcessGroup added = addProcessGroup(group, proposedChildGroup, componentIdSeed, variablesToSkip);
flowController.onProcessGroupAdded(added);
added.findAllRemoteProcessGroups().stream().forEach(RemoteProcessGroup::initialize);
LOG.info("Added {} to {}", added, this);
} else if (childCoordinates == null || updateDescendantVersionedGroups) {
updateProcessGroup(childGroup, proposedChildGroup, componentIdSeed, updatedVersionedComponentIds, true, true, updateDescendantVersionedGroups, variablesToSkip);
LOG.info("Updated {}", childGroup);
}
childGroupsRemoved.remove(proposedChildGroup.getIdentifier());
}
// Funnels
final Map<String, Funnel> funnelsByVersionedId = group.getFunnels().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> funnelsRemoved = new HashSet<>(funnelsByVersionedId.keySet());
for (final VersionedFunnel proposedFunnel : proposed.getFunnels()) {
final Funnel funnel = funnelsByVersionedId.get(proposedFunnel.getIdentifier());
if (funnel == null) {
final Funnel added = addFunnel(group, proposedFunnel, componentIdSeed);
flowController.onFunnelAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedFunnel.getIdentifier())) {
updateFunnel(funnel, proposedFunnel);
LOG.info("Updated {}", funnel);
} else {
funnel.setPosition(new Position(proposedFunnel.getPosition().getX(), proposedFunnel.getPosition().getY()));
}
funnelsRemoved.remove(proposedFunnel.getIdentifier());
}
// Input Ports
final Map<String, Port> inputPortsByVersionedId = group.getInputPorts().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> inputPortsRemoved = new HashSet<>(inputPortsByVersionedId.keySet());
for (final VersionedPort proposedPort : proposed.getInputPorts()) {
final Port port = inputPortsByVersionedId.get(proposedPort.getIdentifier());
if (port == null) {
final Port added = addInputPort(group, proposedPort, componentIdSeed);
flowController.onInputPortAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedPort.getIdentifier())) {
updatePort(port, proposedPort);
LOG.info("Updated {}", port);
} else {
port.setPosition(new Position(proposedPort.getPosition().getX(), proposedPort.getPosition().getY()));
}
inputPortsRemoved.remove(proposedPort.getIdentifier());
}
// Output Ports
final Map<String, Port> outputPortsByVersionedId = group.getOutputPorts().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> outputPortsRemoved = new HashSet<>(outputPortsByVersionedId.keySet());
for (final VersionedPort proposedPort : proposed.getOutputPorts()) {
final Port port = outputPortsByVersionedId.get(proposedPort.getIdentifier());
if (port == null) {
final Port added = addOutputPort(group, proposedPort, componentIdSeed);
flowController.onOutputPortAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedPort.getIdentifier())) {
updatePort(port, proposedPort);
LOG.info("Updated {}", port);
} else {
port.setPosition(new Position(proposedPort.getPosition().getX(), proposedPort.getPosition().getY()));
}
outputPortsRemoved.remove(proposedPort.getIdentifier());
}
// Labels
final Map<String, Label> labelsByVersionedId = group.getLabels().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> labelsRemoved = new HashSet<>(labelsByVersionedId.keySet());
for (final VersionedLabel proposedLabel : proposed.getLabels()) {
final Label label = labelsByVersionedId.get(proposedLabel.getIdentifier());
if (label == null) {
final Label added = addLabel(group, proposedLabel, componentIdSeed);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedLabel.getIdentifier())) {
updateLabel(label, proposedLabel);
LOG.info("Updated {}", label);
} else {
label.setPosition(new Position(proposedLabel.getPosition().getX(), proposedLabel.getPosition().getY()));
}
labelsRemoved.remove(proposedLabel.getIdentifier());
}
// Processors
final Map<String, ProcessorNode> processorsByVersionedId = group.getProcessors().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> processorsRemoved = new HashSet<>(processorsByVersionedId.keySet());
final Map<ProcessorNode, Set<Relationship>> autoTerminatedRelationships = new HashMap<>();
for (final VersionedProcessor proposedProcessor : proposed.getProcessors()) {
final ProcessorNode processor = processorsByVersionedId.get(proposedProcessor.getIdentifier());
if (processor == null) {
final ProcessorNode added = addProcessor(group, proposedProcessor, componentIdSeed);
flowController.onProcessorAdded(added);
final Set<Relationship> proposedAutoTerminated = proposedProcessor.getAutoTerminatedRelationships() == null ? Collections.emptySet() : proposedProcessor.getAutoTerminatedRelationships().stream().map(relName -> added.getRelationship(relName)).collect(Collectors.toSet());
autoTerminatedRelationships.put(added, proposedAutoTerminated);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedProcessor.getIdentifier())) {
updateProcessor(processor, proposedProcessor);
final Set<Relationship> proposedAutoTerminated = proposedProcessor.getAutoTerminatedRelationships() == null ? Collections.emptySet() : proposedProcessor.getAutoTerminatedRelationships().stream().map(relName -> processor.getRelationship(relName)).collect(Collectors.toSet());
if (!processor.getAutoTerminatedRelationships().equals(proposedAutoTerminated)) {
autoTerminatedRelationships.put(processor, proposedAutoTerminated);
}
LOG.info("Updated {}", processor);
} else {
processor.setPosition(new Position(proposedProcessor.getPosition().getX(), proposedProcessor.getPosition().getY()));
}
processorsRemoved.remove(proposedProcessor.getIdentifier());
}
// Remote Groups
final Map<String, RemoteProcessGroup> rpgsByVersionedId = group.getRemoteProcessGroups().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> rpgsRemoved = new HashSet<>(rpgsByVersionedId.keySet());
for (final VersionedRemoteProcessGroup proposedRpg : proposed.getRemoteProcessGroups()) {
final RemoteProcessGroup rpg = rpgsByVersionedId.get(proposedRpg.getIdentifier());
if (rpg == null) {
final RemoteProcessGroup added = addRemoteProcessGroup(group, proposedRpg, componentIdSeed);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedRpg.getIdentifier())) {
updateRemoteProcessGroup(rpg, proposedRpg, componentIdSeed);
LOG.info("Updated {}", rpg);
} else {
rpg.setPosition(new Position(proposedRpg.getPosition().getX(), proposedRpg.getPosition().getY()));
}
rpgsRemoved.remove(proposedRpg.getIdentifier());
}
// Connections
final Map<String, Connection> connectionsByVersionedId = group.getConnections().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> connectionsRemoved = new HashSet<>(connectionsByVersionedId.keySet());
for (final VersionedConnection proposedConnection : proposed.getConnections()) {
final Connection connection = connectionsByVersionedId.get(proposedConnection.getIdentifier());
if (connection == null) {
final Connection added = addConnection(group, proposedConnection, componentIdSeed);
flowController.onConnectionAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (isUpdateable(connection)) {
// If the connection needs to be updated, then the source and destination will already have
// been stopped (else, the validation above would fail). So if the source or the destination is running,
// then we know that we don't need to update the connection.
updateConnection(connection, proposedConnection);
LOG.info("Updated {}", connection);
}
connectionsRemoved.remove(proposedConnection.getIdentifier());
}
// to remove a component if it has a connection going to it!
for (final String removedVersionedId : connectionsRemoved) {
final Connection connection = connectionsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", connection, group);
group.removeConnection(connection);
flowController.onConnectionRemoved(connection);
}
// Once the appropriate connections have been removed, we may now update Processors' auto-terminated relationships.
// We cannot do this above, in the 'updateProcessor' call because if a connection is removed and changed to auto-terminated,
// then updating this in the updateProcessor call above would attempt to set the Relationship to being auto-terminated while a
// Connection for that relationship exists. This will throw an Exception.
autoTerminatedRelationships.forEach((proc, rels) -> proc.setAutoTerminatedRelationships(rels));
// Remove all controller services no longer in use
for (final String removedVersionedId : controllerServicesRemoved) {
final ControllerServiceNode service = servicesByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", service, group);
// Must remove Controller Service through Flow Controller in order to remove from cache
flowController.removeControllerService(service);
}
for (final String removedVersionedId : funnelsRemoved) {
final Funnel funnel = funnelsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", funnel, group);
group.removeFunnel(funnel);
}
for (final String removedVersionedId : inputPortsRemoved) {
final Port port = inputPortsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", port, group);
group.removeInputPort(port);
}
for (final String removedVersionedId : outputPortsRemoved) {
final Port port = outputPortsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", port, group);
group.removeOutputPort(port);
}
for (final String removedVersionedId : labelsRemoved) {
final Label label = labelsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", label, group);
group.removeLabel(label);
}
for (final String removedVersionedId : processorsRemoved) {
final ProcessorNode processor = processorsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", processor, group);
group.removeProcessor(processor);
}
for (final String removedVersionedId : rpgsRemoved) {
final RemoteProcessGroup rpg = rpgsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", rpg, group);
group.removeRemoteProcessGroup(rpg);
}
for (final String removedVersionedId : childGroupsRemoved) {
final ProcessGroup childGroup = childGroupsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", childGroup, group);
group.removeProcessGroup(childGroup);
}
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardProcessSession method commit.
@SuppressWarnings({ "unchecked", "rawtypes" })
private void commit(final Checkpoint checkpoint) {
try {
final long commitStartNanos = System.nanoTime();
resetReadClaim();
try {
claimCache.flush();
} finally {
claimCache.reset();
}
final long updateProvenanceStart = System.nanoTime();
updateProvenanceRepo(checkpoint);
final long claimRemovalStart = System.nanoTime();
final long updateProvenanceNanos = claimRemovalStart - updateProvenanceStart;
/**
* Figure out which content claims can be released. At this point,
* we will decrement the Claimant Count for the claims via the
* Content Repository. We do not actually destroy the content
* because otherwise, we could remove the Original Claim and
* crash/restart before the FlowFileRepository is updated. This will
* result in the FlowFile being restored such that the content claim
* points to the Original Claim -- which has already been removed!
*/
for (final Map.Entry<FlowFileRecord, StandardRepositoryRecord> entry : checkpoint.records.entrySet()) {
final FlowFile flowFile = entry.getKey();
final StandardRepositoryRecord record = entry.getValue();
if (record.isMarkedForDelete()) {
// if the working claim is not the same as the original claim, we can immediately destroy the working claim
// because it was created in this session and is to be deleted. We don't need to wait for the FlowFile Repo to sync.
decrementClaimCount(record.getWorkingClaim());
if (record.getOriginalClaim() != null && !record.getOriginalClaim().equals(record.getWorkingClaim())) {
// if working & original claim are same, don't remove twice; we only want to remove the original
// if it's different from the working. Otherwise, we remove two claimant counts. This causes
// an issue if we only updated the FlowFile attributes.
decrementClaimCount(record.getOriginalClaim());
}
final long flowFileLife = System.currentTimeMillis() - flowFile.getEntryDate();
final Connectable connectable = context.getConnectable();
final Object terminator = connectable instanceof ProcessorNode ? ((ProcessorNode) connectable).getProcessor() : connectable;
LOG.info("{} terminated by {}; life of FlowFile = {} ms", new Object[] { flowFile, terminator, flowFileLife });
} else if (record.isWorking() && record.getWorkingClaim() != record.getOriginalClaim()) {
// records which have been updated - remove original if exists
decrementClaimCount(record.getOriginalClaim());
}
}
final long claimRemovalFinishNanos = System.nanoTime();
final long claimRemovalNanos = claimRemovalFinishNanos - claimRemovalStart;
// Update the FlowFile Repository
try {
final Collection<StandardRepositoryRecord> repoRecords = checkpoint.records.values();
context.getFlowFileRepository().updateRepository((Collection) repoRecords);
} catch (final IOException ioe) {
// if we fail to commit the session, we need to roll back
// the checkpoints as well because none of the checkpoints
// were ever committed.
rollback(false, true);
throw new ProcessException("FlowFile Repository failed to update", ioe);
}
final long flowFileRepoUpdateFinishNanos = System.nanoTime();
final long flowFileRepoUpdateNanos = flowFileRepoUpdateFinishNanos - claimRemovalFinishNanos;
updateEventRepository(checkpoint);
final long updateEventRepositoryFinishNanos = System.nanoTime();
final long updateEventRepositoryNanos = updateEventRepositoryFinishNanos - flowFileRepoUpdateFinishNanos;
// transfer the flowfiles to the connections' queues.
final Map<FlowFileQueue, Collection<FlowFileRecord>> recordMap = new HashMap<>();
for (final StandardRepositoryRecord record : checkpoint.records.values()) {
if (record.isMarkedForAbort() || record.isMarkedForDelete()) {
// these don't need to be transferred
continue;
}
// in this case, we just ignore it, and it will be cleaned up by clearing the records map.
if (record.getCurrent() != null) {
Collection<FlowFileRecord> collection = recordMap.get(record.getDestination());
if (collection == null) {
collection = new ArrayList<>();
recordMap.put(record.getDestination(), collection);
}
collection.add(record.getCurrent());
}
}
for (final Map.Entry<FlowFileQueue, Collection<FlowFileRecord>> entry : recordMap.entrySet()) {
entry.getKey().putAll(entry.getValue());
}
final long enqueueFlowFileFinishNanos = System.nanoTime();
final long enqueueFlowFileNanos = enqueueFlowFileFinishNanos - updateEventRepositoryFinishNanos;
// Delete any files from disk that need to be removed.
for (final Path path : checkpoint.deleteOnCommit.values()) {
try {
Files.deleteIfExists(path);
} catch (final IOException e) {
throw new FlowFileAccessException("Unable to delete " + path.toFile().getAbsolutePath(), e);
}
}
checkpoint.deleteOnCommit.clear();
if (LOG.isInfoEnabled()) {
final String sessionSummary = summarizeEvents(checkpoint);
if (!sessionSummary.isEmpty()) {
LOG.info("{} for {}, committed the following events: {}", new Object[] { this, connectableDescription, sessionSummary });
}
}
for (final Map.Entry<String, Long> entry : checkpoint.countersOnCommit.entrySet()) {
context.adjustCounter(entry.getKey(), entry.getValue());
}
acknowledgeRecords();
resetState();
if (LOG.isDebugEnabled()) {
final StringBuilder timingInfo = new StringBuilder();
timingInfo.append("Session commit for ").append(this).append(" [").append(connectableDescription).append("]").append(" took ");
final long commitNanos = System.nanoTime() - commitStartNanos;
formatNanos(commitNanos, timingInfo);
timingInfo.append("; FlowFile Repository Update took ");
formatNanos(flowFileRepoUpdateNanos, timingInfo);
timingInfo.append("; Claim Removal took ");
formatNanos(claimRemovalNanos, timingInfo);
timingInfo.append("; FlowFile Event Update took ");
formatNanos(updateEventRepositoryNanos, timingInfo);
timingInfo.append("; Enqueuing FlowFiles took ");
formatNanos(enqueueFlowFileNanos, timingInfo);
timingInfo.append("; Updating Provenance Event Repository took ");
formatNanos(updateProvenanceNanos, timingInfo);
LOG.debug(timingInfo.toString());
}
} catch (final Exception e) {
try {
// if we fail to commit the session, we need to roll back
// the checkpoints as well because none of the checkpoints
// were ever committed.
rollback(false, true);
} catch (final Exception e1) {
e.addSuppressed(e1);
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else {
throw new ProcessException(e);
}
}
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardControllerServiceProvider method verifyCanScheduleReferencingComponents.
@Override
public void verifyCanScheduleReferencingComponents(final ControllerServiceNode serviceNode) {
final List<ControllerServiceNode> referencingServices = serviceNode.getReferences().findRecursiveReferences(ControllerServiceNode.class);
final List<ReportingTaskNode> referencingReportingTasks = serviceNode.getReferences().findRecursiveReferences(ReportingTaskNode.class);
final List<ProcessorNode> referencingProcessors = serviceNode.getReferences().findRecursiveReferences(ProcessorNode.class);
final Set<ControllerServiceNode> referencingServiceSet = new HashSet<>(referencingServices);
for (final ReportingTaskNode taskNode : referencingReportingTasks) {
if (taskNode.getScheduledState() != ScheduledState.DISABLED) {
taskNode.verifyCanStart(referencingServiceSet);
}
}
for (final ProcessorNode procNode : referencingProcessors) {
if (procNode.getScheduledState() != ScheduledState.DISABLED) {
procNode.verifyCanStart(referencingServiceSet);
}
}
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardControllerServiceProvider method scheduleReferencingComponents.
@Override
public Set<ConfiguredComponent> scheduleReferencingComponents(final ControllerServiceNode serviceNode) {
// find all of the schedulable components (processors, reporting tasks) that refer to this Controller Service,
// or a service that references this controller service, etc.
final List<ProcessorNode> processors = serviceNode.getReferences().findRecursiveReferences(ProcessorNode.class);
final List<ReportingTaskNode> reportingTasks = serviceNode.getReferences().findRecursiveReferences(ReportingTaskNode.class);
final Set<ConfiguredComponent> updated = new HashSet<>();
// verify that we can start all components (that are not disabled) before doing anything
for (final ProcessorNode node : processors) {
if (node.getScheduledState() != ScheduledState.DISABLED) {
node.verifyCanStart();
updated.add(node);
}
}
for (final ReportingTaskNode node : reportingTasks) {
if (node.getScheduledState() != ScheduledState.DISABLED) {
node.verifyCanStart();
updated.add(node);
}
}
// start all of the components that are not disabled
for (final ProcessorNode node : processors) {
if (node.getScheduledState() != ScheduledState.DISABLED) {
node.getProcessGroup().startProcessor(node, true);
updated.add(node);
}
}
for (final ReportingTaskNode node : reportingTasks) {
if (node.getScheduledState() != ScheduledState.DISABLED) {
processScheduler.schedule(node);
updated.add(node);
}
}
return updated;
}
use of org.apache.nifi.controller.ProcessorNode in project nifi by apache.
the class StandardControllerServiceProvider method getControllerServiceForComponent.
@Override
public ControllerService getControllerServiceForComponent(final String serviceIdentifier, final String componentId) {
// Find the Process Group that owns the component.
ProcessGroup groupOfInterest = null;
final ProcessorNode procNode = flowController.getProcessorNode(componentId);
if (procNode == null) {
final ControllerServiceNode serviceNode = getControllerServiceNode(componentId);
if (serviceNode == null) {
final ReportingTaskNode taskNode = flowController.getReportingTaskNode(componentId);
if (taskNode == null) {
throw new IllegalStateException("Could not find any Processor, Reporting Task, or Controller Service with identifier " + componentId);
}
// we have confirmed that the component is a reporting task. We can only reference Controller Services
// that are scoped at the FlowController level in this case.
final ControllerServiceNode rootServiceNode = flowController.getRootControllerService(serviceIdentifier);
return (rootServiceNode == null) ? null : rootServiceNode.getProxiedControllerService();
} else {
groupOfInterest = serviceNode.getProcessGroup();
}
} else {
groupOfInterest = procNode.getProcessGroup();
}
if (groupOfInterest == null) {
final ControllerServiceNode rootServiceNode = flowController.getRootControllerService(serviceIdentifier);
return (rootServiceNode == null) ? null : rootServiceNode.getProxiedControllerService();
}
final Set<ControllerServiceNode> servicesForGroup = groupOfInterest.getControllerServices(true);
for (final ControllerServiceNode serviceNode : servicesForGroup) {
if (serviceIdentifier.equals(serviceNode.getIdentifier())) {
return serviceNode.getProxiedControllerService();
}
}
return null;
}
Aggregations