use of org.apache.nifi.controller.service.ControllerServiceState in project nifi by apache.
the class ClusterReplicationComponentLifecycle method activateControllerServices.
@Override
public Set<AffectedComponentEntity> activateControllerServices(final URI originalUri, final NiFiUser user, final String groupId, final Set<AffectedComponentEntity> affectedServices, final ControllerServiceState desiredState, final Pause pause) throws LifecycleManagementException {
final Set<String> affectedServiceIds = affectedServices.stream().map(component -> component.getId()).collect(Collectors.toSet());
final Map<String, Revision> serviceRevisionMap = getRevisions(groupId, affectedServiceIds);
final Map<String, RevisionDTO> serviceRevisionDtoMap = serviceRevisionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> dtoFactory.createRevisionDTO(entry.getValue())));
final ActivateControllerServicesEntity activateServicesEntity = new ActivateControllerServicesEntity();
activateServicesEntity.setComponents(serviceRevisionDtoMap);
activateServicesEntity.setId(groupId);
activateServicesEntity.setState(desiredState.name());
URI controllerServicesUri;
try {
controllerServicesUri = new URI(originalUri.getScheme(), originalUri.getUserInfo(), originalUri.getHost(), originalUri.getPort(), "/nifi-api/flow/process-groups/" + groupId + "/controller-services", null, originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
try {
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(user, HttpMethod.PUT, controllerServicesUri, activateServicesEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.PUT, controllerServicesUri, activateServicesEntity, headers).awaitMergedResponse();
}
final int disableServicesStatus = clusterResponse.getStatus();
if (disableServicesStatus != Status.OK.getStatusCode()) {
final String explanation = getResponseEntity(clusterResponse, String.class);
throw new LifecycleManagementException("Failed to update Controller Services to a state of " + desiredState + " due to " + explanation);
}
final boolean serviceTransitioned = waitForControllerServiceStatus(user, originalUri, groupId, affectedServiceIds, desiredState, pause);
if (!serviceTransitioned) {
throw new LifecycleManagementException("Failed while waiting for Controller Services to finish transitioning to a state of " + desiredState);
}
} catch (final InterruptedException ie) {
Thread.currentThread().interrupt();
throw new LifecycleManagementException("Interrupted while transitioning Controller Services to a state of " + desiredState);
}
return affectedServices.stream().map(componentEntity -> serviceFacade.getControllerService(componentEntity.getId(), user)).map(dtoFactory::createAffectedComponentEntity).collect(Collectors.toSet());
}
use of org.apache.nifi.controller.service.ControllerServiceState in project nifi by apache.
the class StandardFlowSynchronizer method updateProcessGroup.
private ProcessGroup updateProcessGroup(final FlowController controller, final ProcessGroup parentGroup, final Element processGroupElement, final StringEncryptor encryptor, final FlowEncodingVersion encodingVersion) throws ProcessorInstantiationException {
// get the parent group ID
final String parentId = (parentGroup == null) ? null : parentGroup.getIdentifier();
// get the process group
final ProcessGroupDTO processGroupDto = FlowFromDOMFactory.getProcessGroup(parentId, processGroupElement, encryptor, encodingVersion);
// update the process group
if (parentId == null) {
/*
* Labels are not included in the "inherit flow" algorithm, so we cannot
* blindly update them because they may not exist in the current flow.
* Therefore, we first remove all labels, and then let the updating
* process add labels defined in the new flow.
*/
final ProcessGroup root = controller.getGroup(controller.getRootGroupId());
for (final Label label : root.findAllLabels()) {
label.getProcessGroup().removeLabel(label);
}
}
// update the process group
controller.updateProcessGroup(processGroupDto);
// get the real process group and ID
final ProcessGroup processGroup = controller.getGroup(processGroupDto.getId());
// determine the scheduled state of all of the Controller Service
final List<Element> controllerServiceNodeList = getChildrenByTagName(processGroupElement, "controllerService");
final Set<ControllerServiceNode> toDisable = new HashSet<>();
final Set<ControllerServiceNode> toEnable = new HashSet<>();
for (final Element serviceElement : controllerServiceNodeList) {
final ControllerServiceDTO dto = FlowFromDOMFactory.getControllerService(serviceElement, encryptor);
final ControllerServiceNode serviceNode = processGroup.getControllerService(dto.getId());
// Check if the controller service is in the correct state. We consider it the correct state if
// we are in a transitional state and heading in the right direction or already in the correct state.
// E.g., it is the correct state if it should be 'DISABLED' and it is either DISABLED or DISABLING.
final ControllerServiceState serviceState = getFinalTransitionState(serviceNode.getState());
final ControllerServiceState clusterState = getFinalTransitionState(ControllerServiceState.valueOf(dto.getState()));
if (serviceState != clusterState) {
switch(clusterState) {
case DISABLED:
toDisable.add(serviceNode);
break;
case ENABLED:
toEnable.add(serviceNode);
break;
}
}
}
controller.disableControllerServicesAsync(toDisable);
controller.enableControllerServices(toEnable);
// processors & ports cannot be updated - they must be the same. Except for the scheduled state.
final List<Element> processorNodeList = getChildrenByTagName(processGroupElement, "processor");
for (final Element processorElement : processorNodeList) {
final ProcessorDTO dto = FlowFromDOMFactory.getProcessor(processorElement, encryptor);
final ProcessorNode procNode = processGroup.getProcessor(dto.getId());
updateNonFingerprintedProcessorSettings(procNode, dto);
if (!procNode.getScheduledState().name().equals(dto.getState())) {
try {
switch(ScheduledState.valueOf(dto.getState())) {
case DISABLED:
// switch processor do disabled. This means we have to stop it (if it's already stopped, this method does nothing),
// and then we have to disable it.
controller.stopProcessor(procNode.getProcessGroupIdentifier(), procNode.getIdentifier());
procNode.getProcessGroup().disableProcessor(procNode);
break;
case RUNNING:
// we want to run now. Make sure processor is not disabled and then start it.
procNode.getProcessGroup().enableProcessor(procNode);
controller.startProcessor(procNode.getProcessGroupIdentifier(), procNode.getIdentifier(), false);
break;
case STOPPED:
if (procNode.getScheduledState() == ScheduledState.DISABLED) {
procNode.getProcessGroup().enableProcessor(procNode);
} else if (procNode.getScheduledState() == ScheduledState.RUNNING) {
controller.stopProcessor(procNode.getProcessGroupIdentifier(), procNode.getIdentifier());
}
break;
}
} catch (final IllegalStateException ise) {
logger.error("Failed to change Scheduled State of {} from {} to {} due to {}", procNode, procNode.getScheduledState().name(), dto.getState(), ise.toString());
logger.error("", ise);
// create bulletin for the Processor Node
controller.getBulletinRepository().addBulletin(BulletinFactory.createBulletin(procNode, "Node Reconnection", Severity.ERROR.name(), "Failed to change Scheduled State of " + procNode + " from " + procNode.getScheduledState().name() + " to " + dto.getState() + " due to " + ise.toString()));
// create bulletin at Controller level.
controller.getBulletinRepository().addBulletin(BulletinFactory.createBulletin("Node Reconnection", Severity.ERROR.name(), "Failed to change Scheduled State of " + procNode + " from " + procNode.getScheduledState().name() + " to " + dto.getState() + " due to " + ise.toString()));
}
}
}
final List<Element> inputPortList = getChildrenByTagName(processGroupElement, "inputPort");
for (final Element portElement : inputPortList) {
final PortDTO dto = FlowFromDOMFactory.getPort(portElement);
final Port port = processGroup.getInputPort(dto.getId());
if (!port.getScheduledState().name().equals(dto.getState())) {
switch(ScheduledState.valueOf(dto.getState())) {
case DISABLED:
// switch processor do disabled. This means we have to stop it (if it's already stopped, this method does nothing),
// and then we have to disable it.
controller.stopConnectable(port);
port.getProcessGroup().disableInputPort(port);
break;
case RUNNING:
// we want to run now. Make sure processor is not disabled and then start it.
port.getProcessGroup().enableInputPort(port);
controller.startConnectable(port);
break;
case STOPPED:
if (port.getScheduledState() == ScheduledState.DISABLED) {
port.getProcessGroup().enableInputPort(port);
} else if (port.getScheduledState() == ScheduledState.RUNNING) {
controller.stopConnectable(port);
}
break;
}
}
}
final List<Element> outputPortList = getChildrenByTagName(processGroupElement, "outputPort");
for (final Element portElement : outputPortList) {
final PortDTO dto = FlowFromDOMFactory.getPort(portElement);
final Port port = processGroup.getOutputPort(dto.getId());
if (!port.getScheduledState().name().equals(dto.getState())) {
switch(ScheduledState.valueOf(dto.getState())) {
case DISABLED:
// switch processor do disabled. This means we have to stop it (if it's already stopped, this method does nothing),
// and then we have to disable it.
controller.stopConnectable(port);
port.getProcessGroup().disableOutputPort(port);
break;
case RUNNING:
// we want to run now. Make sure processor is not disabled and then start it.
port.getProcessGroup().enableOutputPort(port);
controller.startConnectable(port);
break;
case STOPPED:
if (port.getScheduledState() == ScheduledState.DISABLED) {
port.getProcessGroup().enableOutputPort(port);
} else if (port.getScheduledState() == ScheduledState.RUNNING) {
controller.stopConnectable(port);
}
break;
}
}
}
// Update scheduled state of Remote Group Ports
final List<Element> remoteProcessGroupList = getChildrenByTagName(processGroupElement, "remoteProcessGroup");
for (final Element remoteGroupElement : remoteProcessGroupList) {
final RemoteProcessGroupDTO remoteGroupDto = FlowFromDOMFactory.getRemoteProcessGroup(remoteGroupElement, encryptor);
final RemoteProcessGroup rpg = processGroup.getRemoteProcessGroup(remoteGroupDto.getId());
// input ports
final List<Element> inputPortElements = getChildrenByTagName(remoteGroupElement, "inputPort");
for (final Element inputPortElement : inputPortElements) {
final RemoteProcessGroupPortDescriptor portDescriptor = FlowFromDOMFactory.getRemoteProcessGroupPort(inputPortElement);
final String inputPortId = portDescriptor.getId();
final RemoteGroupPort inputPort = rpg.getInputPort(inputPortId);
if (inputPort == null) {
continue;
}
if (portDescriptor.isTransmitting()) {
if (inputPort.getScheduledState() != ScheduledState.RUNNING && inputPort.getScheduledState() != ScheduledState.STARTING) {
rpg.startTransmitting(inputPort);
}
} else if (inputPort.getScheduledState() != ScheduledState.STOPPED && inputPort.getScheduledState() != ScheduledState.STOPPING) {
rpg.stopTransmitting(inputPort);
}
}
// output ports
final List<Element> outputPortElements = getChildrenByTagName(remoteGroupElement, "outputPort");
for (final Element outputPortElement : outputPortElements) {
final RemoteProcessGroupPortDescriptor portDescriptor = FlowFromDOMFactory.getRemoteProcessGroupPort(outputPortElement);
final String outputPortId = portDescriptor.getId();
final RemoteGroupPort outputPort = rpg.getOutputPort(outputPortId);
if (outputPort == null) {
continue;
}
if (portDescriptor.isTransmitting()) {
if (outputPort.getScheduledState() != ScheduledState.RUNNING && outputPort.getScheduledState() != ScheduledState.STARTING) {
rpg.startTransmitting(outputPort);
}
} else if (outputPort.getScheduledState() != ScheduledState.STOPPED && outputPort.getScheduledState() != ScheduledState.STOPPING) {
rpg.stopTransmitting(outputPort);
}
}
}
// add labels
final List<Element> labelNodeList = getChildrenByTagName(processGroupElement, "label");
for (final Element labelElement : labelNodeList) {
final LabelDTO labelDTO = FlowFromDOMFactory.getLabel(labelElement);
final Label label = controller.createLabel(labelDTO.getId(), labelDTO.getLabel());
label.setStyle(labelDTO.getStyle());
label.setPosition(new Position(labelDTO.getPosition().getX(), labelDTO.getPosition().getY()));
label.setVersionedComponentId(labelDTO.getVersionedComponentId());
if (labelDTO.getWidth() != null && labelDTO.getHeight() != null) {
label.setSize(new Size(labelDTO.getWidth(), labelDTO.getHeight()));
}
processGroup.addLabel(label);
}
// update nested process groups (recursively)
final List<Element> nestedProcessGroupNodeList = getChildrenByTagName(processGroupElement, "processGroup");
for (final Element nestedProcessGroupElement : nestedProcessGroupNodeList) {
updateProcessGroup(controller, processGroup, nestedProcessGroupElement, encryptor, encodingVersion);
}
// update connections
final List<Element> connectionNodeList = getChildrenByTagName(processGroupElement, "connection");
for (final Element connectionElement : connectionNodeList) {
final ConnectionDTO dto = FlowFromDOMFactory.getConnection(connectionElement);
final Connection connection = processGroup.getConnection(dto.getId());
connection.setName(dto.getName());
connection.setProcessGroup(processGroup);
if (dto.getLabelIndex() != null) {
connection.setLabelIndex(dto.getLabelIndex());
}
if (dto.getzIndex() != null) {
connection.setZIndex(dto.getzIndex());
}
final List<Position> bendPoints = new ArrayList<>();
for (final PositionDTO bend : dto.getBends()) {
bendPoints.add(new Position(bend.getX(), bend.getY()));
}
connection.setBendPoints(bendPoints);
List<FlowFilePrioritizer> newPrioritizers = null;
final List<String> prioritizers = dto.getPrioritizers();
if (prioritizers != null) {
final List<String> newPrioritizersClasses = new ArrayList<>(prioritizers);
newPrioritizers = new ArrayList<>();
for (final String className : newPrioritizersClasses) {
try {
newPrioritizers.add(controller.createPrioritizer(className));
} catch (final ClassNotFoundException | InstantiationException | IllegalAccessException e) {
throw new IllegalArgumentException("Unable to set prioritizer " + className + ": " + e);
}
}
}
if (newPrioritizers != null) {
connection.getFlowFileQueue().setPriorities(newPrioritizers);
}
if (dto.getBackPressureObjectThreshold() != null) {
connection.getFlowFileQueue().setBackPressureObjectThreshold(dto.getBackPressureObjectThreshold());
}
if (dto.getBackPressureDataSizeThreshold() != null && !dto.getBackPressureDataSizeThreshold().trim().isEmpty()) {
connection.getFlowFileQueue().setBackPressureDataSizeThreshold(dto.getBackPressureDataSizeThreshold());
}
if (dto.getFlowFileExpiration() != null) {
connection.getFlowFileQueue().setFlowFileExpiration(dto.getFlowFileExpiration());
}
}
// Replace the templates with those from the proposed flow
final List<Element> templateNodeList = getChildrenByTagName(processGroupElement, "template");
for (final Element templateElement : templateNodeList) {
final TemplateDTO templateDto = TemplateUtils.parseDto(templateElement);
final Template template = new Template(templateDto);
// This just makes sure that they do.
if (processGroup.getTemplate(template.getIdentifier()) != null) {
processGroup.removeTemplate(template);
}
processGroup.addTemplate(template);
}
return processGroup;
}
use of org.apache.nifi.controller.service.ControllerServiceState in project nifi by apache.
the class StandardFlowSerializer method addControllerService.
public void addControllerService(final Element element, final ControllerServiceNode serviceNode) {
final Element serviceElement = element.getOwnerDocument().createElement("controllerService");
addTextElement(serviceElement, "id", serviceNode.getIdentifier());
addTextElement(serviceElement, "versionedComponentId", serviceNode.getVersionedComponentId());
addTextElement(serviceElement, "name", serviceNode.getName());
addTextElement(serviceElement, "comment", serviceNode.getComments());
addTextElement(serviceElement, "class", serviceNode.getCanonicalClassName());
addBundle(serviceElement, serviceNode.getBundleCoordinate());
final ControllerServiceState state = serviceNode.getState();
final boolean enabled = (state == ControllerServiceState.ENABLED || state == ControllerServiceState.ENABLING);
addTextElement(serviceElement, "enabled", String.valueOf(enabled));
addConfiguration(serviceElement, serviceNode.getProperties(), serviceNode.getAnnotationData(), encryptor);
element.appendChild(serviceElement);
}
use of org.apache.nifi.controller.service.ControllerServiceState in project nifi by apache.
the class ProcessGroupResource method activateControllerServices.
private void activateControllerServices(final String groupId, final URI originalUri, final VariableRegistryUpdateRequest updateRequest, final Pause pause, final Collection<AffectedComponentDTO> affectedServices, final ControllerServiceState desiredState, final VariableRegistryUpdateStep updateStep) throws InterruptedException {
final Set<String> affectedServiceIds = affectedServices.stream().map(component -> component.getId()).collect(Collectors.toSet());
final Map<String, Revision> serviceRevisionMap = getRevisions(groupId, affectedServiceIds);
final Map<String, RevisionDTO> serviceRevisionDtoMap = serviceRevisionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> dtoFactory.createRevisionDTO(entry.getValue())));
final ActivateControllerServicesEntity activateServicesEntity = new ActivateControllerServicesEntity();
activateServicesEntity.setComponents(serviceRevisionDtoMap);
activateServicesEntity.setId(groupId);
activateServicesEntity.setState(desiredState.name());
URI controllerServicesUri;
try {
controllerServicesUri = new URI(originalUri.getScheme(), originalUri.getUserInfo(), originalUri.getHost(), originalUri.getPort(), "/nifi-api/flow/process-groups/" + groupId + "/controller-services", null, originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(HttpMethod.PUT, controllerServicesUri, activateServicesEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), HttpMethod.PUT, controllerServicesUri, activateServicesEntity, headers).awaitMergedResponse();
}
final int disableServicesStatus = clusterResponse.getStatus();
if (disableServicesStatus != Status.OK.getStatusCode()) {
updateStep.setFailureReason("Failed while " + updateStep.getDescription());
updateStep.setComplete(true);
updateRequest.setFailureReason("Failed while " + updateStep.getDescription());
return;
}
updateRequest.setLastUpdated(new Date());
final boolean serviceTransitioned = waitForControllerServiceStatus(originalUri, groupId, affectedServiceIds, desiredState, updateRequest, pause);
updateStep.setComplete(true);
if (!serviceTransitioned) {
updateStep.setFailureReason("Failed while " + updateStep.getDescription());
updateRequest.setComplete(true);
updateRequest.setFailureReason("Failed while " + updateStep.getDescription());
}
}
use of org.apache.nifi.controller.service.ControllerServiceState in project nifi by apache.
the class ClusterReplicationComponentLifecycle method waitForControllerServiceStatus.
/**
* Periodically polls the process group with the given ID, waiting for all controller services whose ID's are given to have the given Controller Service State.
*
* @param user the user making the request
* @param groupId the ID of the Process Group to poll
* @param serviceIds the ID of all Controller Services whose state should be equal to the given desired state
* @param desiredState the desired state for all services with the ID's given
* @param pause the Pause that can be used to wait between polling
* @return <code>true</code> if successful, <code>false</code> if unable to wait for services to reach the desired state
*/
private boolean waitForControllerServiceStatus(final NiFiUser user, final URI originalUri, final String groupId, final Set<String> serviceIds, final ControllerServiceState desiredState, final Pause pause) throws InterruptedException {
URI groupUri;
try {
groupUri = new URI(originalUri.getScheme(), originalUri.getUserInfo(), originalUri.getHost(), originalUri.getPort(), "/nifi-api/flow/process-groups/" + groupId + "/controller-services", "includeAncestorGroups=false,includeDescendantGroups=true", originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
final MultivaluedMap<String, String> requestEntity = new MultivaluedHashMap<>();
boolean continuePolling = true;
while (continuePolling) {
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(user, HttpMethod.GET, groupUri, requestEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.GET, groupUri, requestEntity, headers).awaitMergedResponse();
}
if (clusterResponse.getStatus() != Status.OK.getStatusCode()) {
return false;
}
final ControllerServicesEntity controllerServicesEntity = getResponseEntity(clusterResponse, ControllerServicesEntity.class);
final Set<ControllerServiceEntity> serviceEntities = controllerServicesEntity.getControllerServices();
final Map<String, AffectedComponentEntity> affectedServices = serviceEntities.stream().collect(Collectors.toMap(ControllerServiceEntity::getId, dtoFactory::createAffectedComponentEntity));
// update the affected controller services
updateAffectedControllerServices(serviceEntities, affectedServices);
final String desiredStateName = desiredState.name();
final boolean allServicesMatch = serviceEntities.stream().map(entity -> entity.getComponent()).filter(service -> serviceIds.contains(service.getId())).map(service -> service.getState()).allMatch(state -> state.equals(desiredStateName));
if (allServicesMatch) {
logger.debug("All {} controller services of interest now have the desired state of {}", serviceIds.size(), desiredState);
return true;
}
// Not all of the processors are in the desired state. Pause for a bit and poll again.
continuePolling = pause.pause();
}
return false;
}
Aggregations