use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class SystemDiagnosticsResource method getSystemDiagnostics.
/**
* Gets the system diagnostics for this NiFi instance.
*
* @return A systemDiagnosticsEntity.
* @throws InterruptedException if interrupted
*/
@GET
@Consumes(MediaType.WILDCARD)
@Produces(MediaType.APPLICATION_JSON)
@ApiOperation(value = "Gets the diagnostics for the system NiFi is running on", response = SystemDiagnosticsEntity.class, authorizations = { @Authorization(value = "Read - /system") })
@ApiResponses(value = { @ApiResponse(code = 401, message = "Client could not be authenticated."), @ApiResponse(code = 403, message = "Client is not authorized to make this request.") })
public Response getSystemDiagnostics(@ApiParam(value = "Whether or not to include the breakdown per node. Optional, defaults to false", required = false) @QueryParam("nodewise") @DefaultValue(NODEWISE) final Boolean nodewise, @ApiParam(value = "The id of the node where to get the status.", required = false) @QueryParam("clusterNodeId") final String clusterNodeId) throws InterruptedException {
authorizeSystem();
// ensure a valid request
if (Boolean.TRUE.equals(nodewise) && clusterNodeId != null) {
throw new IllegalArgumentException("Nodewise requests cannot be directed at a specific node.");
}
if (isReplicateRequest()) {
// determine where this request should be sent
if (clusterNodeId == null) {
final NodeResponse nodeResponse;
// to the cluster nodes themselves.
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
nodeResponse = getRequestReplicator().replicate(HttpMethod.GET, getAbsolutePath(), getRequestParameters(), getHeaders()).awaitMergedResponse();
} else {
nodeResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), HttpMethod.GET, getAbsolutePath(), getRequestParameters(), getHeaders()).awaitMergedResponse();
}
final SystemDiagnosticsEntity entity = (SystemDiagnosticsEntity) nodeResponse.getUpdatedEntity();
// ensure there is an updated entity (result of merging) and prune the response as necessary
if (entity != null && !nodewise) {
entity.getSystemDiagnostics().setNodeSnapshots(null);
}
return nodeResponse.getResponse();
} else {
return replicate(HttpMethod.GET);
}
}
final SystemDiagnosticsDTO systemDiagnosticsDto = serviceFacade.getSystemDiagnostics();
// create the response
final SystemDiagnosticsEntity entity = new SystemDiagnosticsEntity();
entity.setSystemDiagnostics(systemDiagnosticsDto);
// generate the response
return generateOkResponse(entity).build();
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class VersionsResource method replicateVersionControlMapping.
private void replicateVersionControlMapping(final VersionControlComponentMappingEntity mappingEntity, final StartVersionControlRequestEntity requestEntity, final URI requestUri, final String groupId) {
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
final NodeResponse clusterResponse;
try {
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(HttpMethod.PUT, requestUri, mappingEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), HttpMethod.PUT, requestUri, mappingEntity, headers).awaitMergedResponse();
}
} catch (final InterruptedException ie) {
Thread.currentThread().interrupt();
if (requestEntity.getVersionedFlow().getFlowId() == null) {
// We had to create the flow for this snapshot. Since we failed to replicate the Version Control Info, remove the
// flow from the Flow Registry (use best effort; if we can't remove it, just log and move on).
final VersionControlInformationDTO vci = mappingEntity.getVersionControlInformation();
try {
serviceFacade.deleteVersionedFlow(vci.getRegistryId(), vci.getBucketId(), vci.getFlowId());
} catch (final Exception e) {
logger.error("Created Versioned Flow with ID {} in bucket with ID {} but failed to replicate the Version Control Information to cluster. " + "Attempted to delete the newly created (empty) flow from the Flow Registry but failed", vci.getFlowId(), vci.getBucketId(), e);
}
}
throw new RuntimeException("Interrupted while updating Version Control Information for Process Group with ID " + groupId + ".", ie);
}
if (clusterResponse.getStatus() != Status.OK.getStatusCode()) {
if (requestEntity.getVersionedFlow().getFlowId() == null) {
// We had to create the flow for this snapshot. Since we failed to replicate the Version Control Info, remove the
// flow from the Flow Registry (use best effort; if we can't remove it, just log and move on).
final VersionControlInformationDTO vci = mappingEntity.getVersionControlInformation();
try {
serviceFacade.deleteVersionedFlow(vci.getRegistryId(), vci.getBucketId(), vci.getFlowId());
} catch (final Exception e) {
logger.error("Created Versioned Flow with ID {} in bucket with ID {} but failed to replicate the Version Control Information to cluster. " + "Attempted to delete the newly created (empty) flow from the Flow Registry but failed", vci.getFlowId(), vci.getBucketId(), e);
}
}
final String message = "Failed to update Version Control Information for Process Group with ID " + groupId + ".";
final Throwable cause = clusterResponse.getThrowable();
if (cause == null) {
throw new IllegalStateException(message);
} else {
throw new IllegalStateException(message, cause);
}
}
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class VersionsResource method updateFlowVersion.
private VersionControlInformationEntity updateFlowVersion(final String groupId, final ComponentLifecycle componentLifecycle, final URI exampleUri, final Set<AffectedComponentEntity> affectedComponents, final NiFiUser user, final boolean replicateRequest, final Revision revision, final VersionControlInformationEntity requestEntity, final VersionedFlowSnapshot flowSnapshot, final AsynchronousWebRequest<VersionControlInformationEntity> asyncRequest, final String idGenerationSeed, final boolean verifyNotModified, final boolean updateDescendantVersionedFlows) throws LifecycleManagementException, ResumeFlowException {
// Steps 6-7: Determine which components must be stopped and stop them.
final Set<String> stoppableReferenceTypes = new HashSet<>();
stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_PROCESSOR);
stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_REMOTE_INPUT_PORT);
stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_REMOTE_OUTPUT_PORT);
stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_INPUT_PORT);
stoppableReferenceTypes.add(AffectedComponentDTO.COMPONENT_TYPE_OUTPUT_PORT);
final Set<AffectedComponentEntity> runningComponents = affectedComponents.stream().filter(dto -> stoppableReferenceTypes.contains(dto.getComponent().getReferenceType())).filter(dto -> "Running".equalsIgnoreCase(dto.getComponent().getState())).collect(Collectors.toSet());
logger.info("Stopping {} Processors", runningComponents.size());
final CancellableTimedPause stopComponentsPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS);
asyncRequest.setCancelCallback(stopComponentsPause::cancel);
componentLifecycle.scheduleComponents(exampleUri, user, groupId, runningComponents, ScheduledState.STOPPED, stopComponentsPause);
if (asyncRequest.isCancelled()) {
return null;
}
asyncRequest.update(new Date(), "Disabling Affected Controller Services", 20);
// Steps 8-9. Disable enabled controller services that are affected
final Set<AffectedComponentEntity> enabledServices = affectedComponents.stream().filter(dto -> AffectedComponentDTO.COMPONENT_TYPE_CONTROLLER_SERVICE.equals(dto.getComponent().getReferenceType())).filter(dto -> "Enabled".equalsIgnoreCase(dto.getComponent().getState())).collect(Collectors.toSet());
logger.info("Disabling {} Controller Services", enabledServices.size());
final CancellableTimedPause disableServicesPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS);
asyncRequest.setCancelCallback(disableServicesPause::cancel);
componentLifecycle.activateControllerServices(exampleUri, user, groupId, enabledServices, ControllerServiceState.DISABLED, disableServicesPause);
if (asyncRequest.isCancelled()) {
return null;
}
asyncRequest.update(new Date(), "Updating Flow", 40);
logger.info("Updating Process Group with ID {} to version {} of the Versioned Flow", groupId, flowSnapshot.getSnapshotMetadata().getVersion());
// by replicating a PUT to /nifi-api/versions/process-groups/{groupId}
try {
if (replicateRequest) {
final URI updateUri;
try {
updateUri = new URI(exampleUri.getScheme(), exampleUri.getUserInfo(), exampleUri.getHost(), exampleUri.getPort(), "/nifi-api/versions/process-groups/" + groupId, null, exampleUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
final VersionedFlowSnapshotEntity snapshotEntity = new VersionedFlowSnapshotEntity();
snapshotEntity.setProcessGroupRevision(dtoFactory.createRevisionDTO(revision));
snapshotEntity.setRegistryId(requestEntity.getVersionControlInformation().getRegistryId());
snapshotEntity.setVersionedFlow(flowSnapshot);
snapshotEntity.setUpdateDescendantVersionedFlows(updateDescendantVersionedFlows);
final NodeResponse clusterResponse;
try {
logger.debug("Replicating PUT request to {} for user {}", updateUri, user);
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(user, HttpMethod.PUT, updateUri, snapshotEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.PUT, updateUri, snapshotEntity, headers).awaitMergedResponse();
}
} catch (final InterruptedException ie) {
logger.warn("Interrupted while replicating PUT request to {} for user {}", updateUri, user);
Thread.currentThread().interrupt();
throw new LifecycleManagementException("Interrupted while updating flows across cluster", ie);
}
final int updateFlowStatus = clusterResponse.getStatus();
if (updateFlowStatus != Status.OK.getStatusCode()) {
final String explanation = getResponseEntity(clusterResponse, String.class);
logger.error("Failed to update flow across cluster when replicating PUT request to {} for user {}. Received {} response with explanation: {}", updateUri, user, updateFlowStatus, explanation);
throw new LifecycleManagementException("Failed to update Flow on all nodes in cluster due to " + explanation);
}
} else {
// Step 10: Ensure that if any connection exists in the flow and does not exist in the proposed snapshot,
// that it has no data in it. Ensure that no Input Port was removed, unless it currently has no incoming connections.
// Ensure that no Output Port was removed, unless it currently has no outgoing connections.
serviceFacade.verifyCanUpdate(groupId, flowSnapshot, true, verifyNotModified);
// Step 11-12. Update Process Group to the new flow and update variable registry with any Variables that were added or removed
final VersionControlInformationDTO requestVci = requestEntity.getVersionControlInformation();
final Bucket bucket = flowSnapshot.getBucket();
final VersionedFlow flow = flowSnapshot.getFlow();
final VersionedFlowSnapshotMetadata metadata = flowSnapshot.getSnapshotMetadata();
final VersionControlInformationDTO vci = new VersionControlInformationDTO();
vci.setBucketId(metadata.getBucketIdentifier());
vci.setBucketName(bucket.getName());
vci.setFlowDescription(flow.getDescription());
vci.setFlowId(flow.getIdentifier());
vci.setFlowName(flow.getName());
vci.setGroupId(groupId);
vci.setRegistryId(requestVci.getRegistryId());
vci.setRegistryName(serviceFacade.getFlowRegistryName(requestVci.getRegistryId()));
vci.setVersion(metadata.getVersion());
vci.setState(flowSnapshot.isLatest() ? VersionedFlowState.UP_TO_DATE.name() : VersionedFlowState.STALE.name());
serviceFacade.updateProcessGroupContents(user, revision, groupId, vci, flowSnapshot, idGenerationSeed, verifyNotModified, false, updateDescendantVersionedFlows);
}
} finally {
if (!asyncRequest.isCancelled()) {
if (logger.isDebugEnabled()) {
logger.debug("Re-Enabling {} Controller Services: {}", enabledServices.size(), enabledServices);
}
asyncRequest.update(new Date(), "Re-Enabling Controller Services", 60);
// Step 13. Re-enable all disabled controller services
final CancellableTimedPause enableServicesPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS);
asyncRequest.setCancelCallback(enableServicesPause::cancel);
final Set<AffectedComponentEntity> servicesToEnable = getUpdatedEntities(enabledServices, user);
logger.info("Successfully updated flow; re-enabling {} Controller Services", servicesToEnable.size());
try {
componentLifecycle.activateControllerServices(exampleUri, user, groupId, servicesToEnable, ControllerServiceState.ENABLED, enableServicesPause);
} catch (final IllegalStateException ise) {
// a more intelligent error message as to exactly what happened, rather than indicate that the flow could not be updated.
throw new ResumeFlowException("Failed to re-enable Controller Services because " + ise.getMessage(), ise);
}
}
if (!asyncRequest.isCancelled()) {
if (logger.isDebugEnabled()) {
logger.debug("Restart {} Processors: {}", runningComponents.size(), runningComponents);
}
asyncRequest.update(new Date(), "Restarting Processors", 80);
// Step 14. Restart all components
final Set<AffectedComponentEntity> componentsToStart = getUpdatedEntities(runningComponents, user);
// If there are any Remote Group Ports that are supposed to be started and have no connections, we want to remove those from our Set.
// This will happen if the Remote Group Port is transmitting when the version change happens but the new flow version does not have
// a connection to the port. In such a case, the Port still is included in the Updated Entities because we do not remove them
// when updating the flow (they are removed in the background).
final Set<AffectedComponentEntity> avoidStarting = new HashSet<>();
for (final AffectedComponentEntity componentEntity : componentsToStart) {
final AffectedComponentDTO componentDto = componentEntity.getComponent();
final String referenceType = componentDto.getReferenceType();
if (!AffectedComponentDTO.COMPONENT_TYPE_REMOTE_INPUT_PORT.equals(referenceType) && !AffectedComponentDTO.COMPONENT_TYPE_REMOTE_OUTPUT_PORT.equals(referenceType)) {
continue;
}
boolean startComponent;
try {
startComponent = serviceFacade.isRemoteGroupPortConnected(componentDto.getProcessGroupId(), componentDto.getId());
} catch (final ResourceNotFoundException rnfe) {
// Could occur if RPG is refreshed at just the right time.
startComponent = false;
}
// rather than removing the component here, because doing so would result in a ConcurrentModificationException.
if (!startComponent) {
avoidStarting.add(componentEntity);
}
}
componentsToStart.removeAll(avoidStarting);
final CancellableTimedPause startComponentsPause = new CancellableTimedPause(250, Long.MAX_VALUE, TimeUnit.MILLISECONDS);
asyncRequest.setCancelCallback(startComponentsPause::cancel);
logger.info("Restarting {} Processors", componentsToStart.size());
try {
componentLifecycle.scheduleComponents(exampleUri, user, groupId, componentsToStart, ScheduledState.RUNNING, startComponentsPause);
} catch (final IllegalStateException ise) {
// a more intelligent error message as to exactly what happened, rather than indicate that the flow could not be updated.
throw new ResumeFlowException("Failed to restart components because " + ise.getMessage(), ise);
}
}
}
asyncRequest.setCancelCallback(null);
if (asyncRequest.isCancelled()) {
return null;
}
asyncRequest.update(new Date(), "Complete", 100);
return serviceFacade.getVersionControlInformation(groupId);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class CountersResource method getCounters.
/**
* Retrieves the counters report for this NiFi.
*
* @return A countersEntity.
* @throws InterruptedException if interrupted
*/
@GET
@Consumes(MediaType.WILDCARD)
@Produces(MediaType.APPLICATION_JSON)
// necessary due to a bug in swagger
@Path("")
@ApiOperation(value = "Gets the current counters for this NiFi", notes = NON_GUARANTEED_ENDPOINT, response = CountersEntity.class, authorizations = { @Authorization(value = "Read - /counters") })
@ApiResponses(value = { @ApiResponse(code = 400, message = "NiFi was unable to complete the request because it was invalid. The request should not be retried without modification."), @ApiResponse(code = 401, message = "Client could not be authenticated."), @ApiResponse(code = 403, message = "Client is not authorized to make this request."), @ApiResponse(code = 409, message = "The request was valid but NiFi was not in the appropriate state to process it. Retrying the same request later may be successful.") })
public Response getCounters(@ApiParam(value = "Whether or not to include the breakdown per node. Optional, defaults to false", required = false) @QueryParam("nodewise") @DefaultValue(NODEWISE) final Boolean nodewise, @ApiParam(value = "The id of the node where to get the status.", required = false) @QueryParam("clusterNodeId") final String clusterNodeId) throws InterruptedException {
authorizeCounters(RequestAction.READ);
// ensure a valid request
if (Boolean.TRUE.equals(nodewise) && clusterNodeId != null) {
throw new IllegalArgumentException("Nodewise requests cannot be directed at a specific node.");
}
// replicate if necessary
if (isReplicateRequest()) {
// determine where this request should be sent
if (clusterNodeId == null) {
final NodeResponse nodeResponse;
// to the cluster nodes themselves.
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
nodeResponse = getRequestReplicator().replicate(HttpMethod.GET, getAbsolutePath(), getRequestParameters(), getHeaders()).awaitMergedResponse();
} else {
nodeResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), HttpMethod.GET, getAbsolutePath(), getRequestParameters(), getHeaders()).awaitMergedResponse();
}
final CountersEntity entity = (CountersEntity) nodeResponse.getUpdatedEntity();
// ensure there is an updated entity (result of merging) and prune the response as necessary
if (entity != null && !nodewise) {
entity.getCounters().setNodeSnapshots(null);
}
return nodeResponse.getResponse();
} else {
// get the target node and ensure it exists
final NodeIdentifier targetNode = getClusterCoordinator().getNodeIdentifier(clusterNodeId);
if (targetNode == null) {
throw new UnknownNodeException("The specified cluster node does not exist.");
}
return replicate(HttpMethod.GET, targetNode);
}
}
final CountersDTO countersReport = serviceFacade.getCounters();
// create the response entity
final CountersEntity entity = new CountersEntity();
entity.setCounters(countersReport);
// generate the response
return generateOkResponse(entity).build();
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class FlowResource method getProcessorStatus.
// ------
// status
// ------
/**
* Retrieves the specified processor status.
*
* @param id The id of the processor history to retrieve.
* @return A processorStatusEntity.
* @throws InterruptedException if interrupted
*/
@GET
@Consumes(MediaType.WILDCARD)
@Produces(MediaType.APPLICATION_JSON)
@Path("processors/{id}/status")
@ApiOperation(value = "Gets status for a processor", response = ProcessorStatusEntity.class, authorizations = { @Authorization(value = "Read - /flow") })
@ApiResponses(value = { @ApiResponse(code = 400, message = "NiFi was unable to complete the request because it was invalid. The request should not be retried without modification."), @ApiResponse(code = 401, message = "Client could not be authenticated."), @ApiResponse(code = 403, message = "Client is not authorized to make this request."), @ApiResponse(code = 404, message = "The specified resource could not be found."), @ApiResponse(code = 409, message = "The request was valid but NiFi was not in the appropriate state to process it. Retrying the same request later may be successful.") })
public Response getProcessorStatus(@ApiParam(value = "Whether or not to include the breakdown per node. Optional, defaults to false", required = false) @QueryParam("nodewise") @DefaultValue(NODEWISE) Boolean nodewise, @ApiParam(value = "The id of the node where to get the status.", required = false) @QueryParam("clusterNodeId") String clusterNodeId, @ApiParam(value = "The processor id.", required = true) @PathParam("id") String id) throws InterruptedException {
authorizeFlow();
// ensure a valid request
if (Boolean.TRUE.equals(nodewise) && clusterNodeId != null) {
throw new IllegalArgumentException("Nodewise requests cannot be directed at a specific node.");
}
if (isReplicateRequest()) {
// determine where this request should be sent
if (clusterNodeId == null) {
final NodeResponse nodeResponse = replicateNodeResponse(HttpMethod.GET);
final ProcessorStatusEntity entity = (ProcessorStatusEntity) nodeResponse.getUpdatedEntity();
// ensure there is an updated entity (result of merging) and prune the response as necessary
if (entity != null && !nodewise) {
entity.getProcessorStatus().setNodeSnapshots(null);
}
return nodeResponse.getResponse();
} else {
return replicate(HttpMethod.GET, clusterNodeId);
}
}
// get the specified processor status
final ProcessorStatusEntity entity = serviceFacade.getProcessorStatus(id);
return generateOkResponse(entity).build();
}
Aggregations