use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class ProcessGroupResource method activateControllerServices.
private void activateControllerServices(final String groupId, final URI originalUri, final VariableRegistryUpdateRequest updateRequest, final Pause pause, final Collection<AffectedComponentDTO> affectedServices, final ControllerServiceState desiredState, final VariableRegistryUpdateStep updateStep) throws InterruptedException {
final Set<String> affectedServiceIds = affectedServices.stream().map(component -> component.getId()).collect(Collectors.toSet());
final Map<String, Revision> serviceRevisionMap = getRevisions(groupId, affectedServiceIds);
final Map<String, RevisionDTO> serviceRevisionDtoMap = serviceRevisionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> dtoFactory.createRevisionDTO(entry.getValue())));
final ActivateControllerServicesEntity activateServicesEntity = new ActivateControllerServicesEntity();
activateServicesEntity.setComponents(serviceRevisionDtoMap);
activateServicesEntity.setId(groupId);
activateServicesEntity.setState(desiredState.name());
URI controllerServicesUri;
try {
controllerServicesUri = new URI(originalUri.getScheme(), originalUri.getUserInfo(), originalUri.getHost(), originalUri.getPort(), "/nifi-api/flow/process-groups/" + groupId + "/controller-services", null, originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(HttpMethod.PUT, controllerServicesUri, activateServicesEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), HttpMethod.PUT, controllerServicesUri, activateServicesEntity, headers).awaitMergedResponse();
}
final int disableServicesStatus = clusterResponse.getStatus();
if (disableServicesStatus != Status.OK.getStatusCode()) {
updateStep.setFailureReason("Failed while " + updateStep.getDescription());
updateStep.setComplete(true);
updateRequest.setFailureReason("Failed while " + updateStep.getDescription());
return;
}
updateRequest.setLastUpdated(new Date());
final boolean serviceTransitioned = waitForControllerServiceStatus(originalUri, groupId, affectedServiceIds, desiredState, updateRequest, pause);
updateStep.setComplete(true);
if (!serviceTransitioned) {
updateStep.setFailureReason("Failed while " + updateStep.getDescription());
updateRequest.setComplete(true);
updateRequest.setFailureReason("Failed while " + updateStep.getDescription());
}
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class ProcessGroupResource method waitForProcessorStatus.
/**
* Periodically polls the process group with the given ID, waiting for all processors whose ID's are given to have the given Scheduled State.
*
* @param groupId the ID of the Process Group to poll
* @param processorIds the ID of all Processors whose state should be equal to the given desired state
* @param desiredState the desired state for all processors with the ID's given
* @param pause the Pause that can be used to wait between polling
* @return <code>true</code> if successful, <code>false</code> if unable to wait for processors to reach the desired state
*/
private boolean waitForProcessorStatus(final URI originalUri, final String groupId, final Set<String> processorIds, final ScheduledState desiredState, final VariableRegistryUpdateRequest updateRequest, final Pause pause) throws InterruptedException {
URI groupUri;
try {
groupUri = new URI(originalUri.getScheme(), originalUri.getUserInfo(), originalUri.getHost(), originalUri.getPort(), "/nifi-api/process-groups/" + groupId + "/processors", "includeDescendantGroups=true", originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
final MultivaluedMap<String, String> requestEntity = new MultivaluedHashMap<>();
boolean continuePolling = true;
while (continuePolling) {
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(HttpMethod.GET, groupUri, requestEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), HttpMethod.GET, groupUri, requestEntity, headers).awaitMergedResponse();
}
if (clusterResponse.getStatus() != Status.OK.getStatusCode()) {
return false;
}
final ProcessorsEntity processorsEntity = getResponseEntity(clusterResponse, ProcessorsEntity.class);
final Set<ProcessorEntity> processorEntities = processorsEntity.getProcessors();
if (isProcessorActionComplete(processorEntities, updateRequest, processorIds, desiredState)) {
logger.debug("All {} processors of interest now have the desired state of {}", processorIds.size(), desiredState);
return true;
}
// Not all of the processors are in the desired state. Pause for a bit and poll again.
continuePolling = pause.pause();
}
return false;
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class ClusterReplicationComponentLifecycle method scheduleComponents.
@Override
public Set<AffectedComponentEntity> scheduleComponents(final URI exampleUri, final NiFiUser user, final String groupId, final Set<AffectedComponentEntity> components, final ScheduledState desiredState, final Pause pause) throws LifecycleManagementException {
final Set<String> componentIds = components.stream().map(component -> component.getId()).collect(Collectors.toSet());
final Map<String, AffectedComponentEntity> componentMap = components.stream().collect(Collectors.toMap(AffectedComponentEntity::getId, Function.identity()));
final Map<String, Revision> componentRevisionMap = getRevisions(groupId, componentIds);
final Map<String, RevisionDTO> componentRevisionDtoMap = componentRevisionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> dtoFactory.createRevisionDTO(entry.getValue())));
final ScheduleComponentsEntity scheduleProcessorsEntity = new ScheduleComponentsEntity();
scheduleProcessorsEntity.setComponents(componentRevisionDtoMap);
scheduleProcessorsEntity.setId(groupId);
scheduleProcessorsEntity.setState(desiredState.name());
URI scheduleGroupUri;
try {
scheduleGroupUri = new URI(exampleUri.getScheme(), exampleUri.getUserInfo(), exampleUri.getHost(), exampleUri.getPort(), "/nifi-api/flow/process-groups/" + groupId, null, exampleUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
try {
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(user, HttpMethod.PUT, scheduleGroupUri, scheduleProcessorsEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.PUT, scheduleGroupUri, scheduleProcessorsEntity, headers).awaitMergedResponse();
}
final int scheduleComponentStatus = clusterResponse.getStatus();
if (scheduleComponentStatus != Status.OK.getStatusCode()) {
final String explanation = getResponseEntity(clusterResponse, String.class);
throw new LifecycleManagementException("Failed to transition components to a state of " + desiredState + " due to " + explanation);
}
final boolean processorsTransitioned = waitForProcessorStatus(user, exampleUri, groupId, componentMap, desiredState, pause);
if (!processorsTransitioned) {
throw new LifecycleManagementException("Failed while waiting for components to transition to state of " + desiredState);
}
} catch (final InterruptedException ie) {
Thread.currentThread().interrupt();
throw new LifecycleManagementException("Interrupted while attempting to transition components to state of " + desiredState);
}
final Set<AffectedComponentEntity> updatedEntities = components.stream().map(component -> AffectedComponentUtils.updateEntity(component, serviceFacade, dtoFactory, user)).collect(Collectors.toSet());
return updatedEntities;
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class ClusterReplicationComponentLifecycle method waitForControllerServiceStatus.
/**
* Periodically polls the process group with the given ID, waiting for all controller services whose ID's are given to have the given Controller Service State.
*
* @param user the user making the request
* @param groupId the ID of the Process Group to poll
* @param serviceIds the ID of all Controller Services whose state should be equal to the given desired state
* @param desiredState the desired state for all services with the ID's given
* @param pause the Pause that can be used to wait between polling
* @return <code>true</code> if successful, <code>false</code> if unable to wait for services to reach the desired state
*/
private boolean waitForControllerServiceStatus(final NiFiUser user, final URI originalUri, final String groupId, final Set<String> serviceIds, final ControllerServiceState desiredState, final Pause pause) throws InterruptedException {
URI groupUri;
try {
groupUri = new URI(originalUri.getScheme(), originalUri.getUserInfo(), originalUri.getHost(), originalUri.getPort(), "/nifi-api/flow/process-groups/" + groupId + "/controller-services", "includeAncestorGroups=false,includeDescendantGroups=true", originalUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
final MultivaluedMap<String, String> requestEntity = new MultivaluedHashMap<>();
boolean continuePolling = true;
while (continuePolling) {
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(user, HttpMethod.GET, groupUri, requestEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.GET, groupUri, requestEntity, headers).awaitMergedResponse();
}
if (clusterResponse.getStatus() != Status.OK.getStatusCode()) {
return false;
}
final ControllerServicesEntity controllerServicesEntity = getResponseEntity(clusterResponse, ControllerServicesEntity.class);
final Set<ControllerServiceEntity> serviceEntities = controllerServicesEntity.getControllerServices();
final Map<String, AffectedComponentEntity> affectedServices = serviceEntities.stream().collect(Collectors.toMap(ControllerServiceEntity::getId, dtoFactory::createAffectedComponentEntity));
// update the affected controller services
updateAffectedControllerServices(serviceEntities, affectedServices);
final String desiredStateName = desiredState.name();
final boolean allServicesMatch = serviceEntities.stream().map(entity -> entity.getComponent()).filter(service -> serviceIds.contains(service.getId())).map(service -> service.getState()).allMatch(state -> state.equals(desiredStateName));
if (allServicesMatch) {
logger.debug("All {} controller services of interest now have the desired state of {}", serviceIds.size(), desiredState);
return true;
}
// Not all of the processors are in the desired state. Pause for a bit and poll again.
continuePolling = pause.pause();
}
return false;
}
Aggregations