use of org.apache.nifi.web.api.entity.ScheduleComponentsEntity in project nifi by apache.
the class FlowResource method scheduleComponents.
/**
* Updates the specified process group.
*
* @param httpServletRequest request
* @param id The id of the process group.
* @param requestScheduleComponentsEntity A scheduleComponentsEntity.
* @return A processGroupEntity.
*/
@PUT
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
@Path("process-groups/{id}")
@ApiOperation(value = "Schedule or unschedule components in the specified Process Group.", response = ScheduleComponentsEntity.class, authorizations = { @Authorization(value = "Read - /flow"), @Authorization(value = "Write - /{component-type}/{uuid} - For every component being scheduled/unscheduled") })
@ApiResponses(value = { @ApiResponse(code = 400, message = "NiFi was unable to complete the request because it was invalid. The request should not be retried without modification."), @ApiResponse(code = 401, message = "Client could not be authenticated."), @ApiResponse(code = 403, message = "Client is not authorized to make this request."), @ApiResponse(code = 404, message = "The specified resource could not be found."), @ApiResponse(code = 409, message = "The request was valid but NiFi was not in the appropriate state to process it. Retrying the same request later may be successful.") })
public Response scheduleComponents(@Context HttpServletRequest httpServletRequest, @ApiParam(value = "The process group id.", required = true) @PathParam("id") String id, @ApiParam(value = "The request to schedule or unschedule. If the comopnents in the request are not specified, all authorized components will be considered.", required = true) final ScheduleComponentsEntity requestScheduleComponentsEntity) {
// ensure the same id is being used
if (!id.equals(requestScheduleComponentsEntity.getId())) {
throw new IllegalArgumentException(String.format("The process group id (%s) in the request body does " + "not equal the process group id of the requested resource (%s).", requestScheduleComponentsEntity.getId(), id));
}
final ScheduledState state;
if (requestScheduleComponentsEntity.getState() == null) {
throw new IllegalArgumentException("The scheduled state must be specified.");
} else {
try {
state = ScheduledState.valueOf(requestScheduleComponentsEntity.getState());
} catch (final IllegalArgumentException iae) {
throw new IllegalArgumentException(String.format("The scheduled must be one of [%s].", StringUtils.join(EnumSet.of(ScheduledState.RUNNING, ScheduledState.STOPPED), ", ")));
}
}
// ensure its a supported scheduled state
if (ScheduledState.DISABLED.equals(state) || ScheduledState.STARTING.equals(state) || ScheduledState.STOPPING.equals(state)) {
throw new IllegalArgumentException(String.format("The scheduled must be one of [%s].", StringUtils.join(EnumSet.of(ScheduledState.RUNNING, ScheduledState.STOPPED), ", ")));
}
// if the components are not specified, gather all components and their current revision
if (requestScheduleComponentsEntity.getComponents() == null) {
// get the current revisions for the components being updated
final Set<Revision> revisions = serviceFacade.getRevisionsFromGroup(id, group -> {
final Set<String> componentIds = new HashSet<>();
// ensure authorized for each processor we will attempt to schedule
group.findAllProcessors().stream().filter(ScheduledState.RUNNING.equals(state) ? ProcessGroup.SCHEDULABLE_PROCESSORS : ProcessGroup.UNSCHEDULABLE_PROCESSORS).filter(processor -> processor.isAuthorized(authorizer, RequestAction.WRITE, NiFiUserUtils.getNiFiUser())).forEach(processor -> {
componentIds.add(processor.getIdentifier());
});
// ensure authorized for each input port we will attempt to schedule
group.findAllInputPorts().stream().filter(ScheduledState.RUNNING.equals(state) ? ProcessGroup.SCHEDULABLE_PORTS : ProcessGroup.UNSCHEDULABLE_PORTS).filter(inputPort -> inputPort.isAuthorized(authorizer, RequestAction.WRITE, NiFiUserUtils.getNiFiUser())).forEach(inputPort -> {
componentIds.add(inputPort.getIdentifier());
});
// ensure authorized for each output port we will attempt to schedule
group.findAllOutputPorts().stream().filter(ScheduledState.RUNNING.equals(state) ? ProcessGroup.SCHEDULABLE_PORTS : ProcessGroup.UNSCHEDULABLE_PORTS).filter(outputPort -> outputPort.isAuthorized(authorizer, RequestAction.WRITE, NiFiUserUtils.getNiFiUser())).forEach(outputPort -> {
componentIds.add(outputPort.getIdentifier());
});
return componentIds;
});
// build the component mapping
final Map<String, RevisionDTO> componentsToSchedule = new HashMap<>();
revisions.forEach(revision -> {
final RevisionDTO dto = new RevisionDTO();
dto.setClientId(revision.getClientId());
dto.setVersion(revision.getVersion());
componentsToSchedule.put(revision.getComponentId(), dto);
});
// set the components and their current revision
requestScheduleComponentsEntity.setComponents(componentsToSchedule);
}
if (isReplicateRequest()) {
return replicate(HttpMethod.PUT, requestScheduleComponentsEntity);
}
final Map<String, RevisionDTO> requestComponentsToSchedule = requestScheduleComponentsEntity.getComponents();
final Map<String, Revision> requestComponentRevisions = requestComponentsToSchedule.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> getRevision(e.getValue(), e.getKey())));
final Set<Revision> requestRevisions = new HashSet<>(requestComponentRevisions.values());
return withWriteLock(serviceFacade, requestScheduleComponentsEntity, requestRevisions, lookup -> {
// ensure access to the flow
authorizeFlow();
// ensure access to every component being scheduled
requestComponentsToSchedule.keySet().forEach(componentId -> {
final Authorizable connectable = lookup.getLocalConnectable(componentId);
connectable.authorize(authorizer, RequestAction.WRITE, NiFiUserUtils.getNiFiUser());
});
}, () -> serviceFacade.verifyScheduleComponents(id, state, requestComponentRevisions.keySet()), (revisions, scheduleComponentsEntity) -> {
final ScheduledState scheduledState = ScheduledState.valueOf(scheduleComponentsEntity.getState());
final Map<String, RevisionDTO> componentsToSchedule = scheduleComponentsEntity.getComponents();
final Map<String, Revision> componentRevisions = componentsToSchedule.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> getRevision(e.getValue(), e.getKey())));
// update the process group
final ScheduleComponentsEntity entity = serviceFacade.scheduleComponents(id, scheduledState, componentRevisions);
return generateOkResponse(entity).build();
});
}
use of org.apache.nifi.web.api.entity.ScheduleComponentsEntity in project nifi by apache.
the class ClusterReplicationComponentLifecycle method scheduleComponents.
@Override
public Set<AffectedComponentEntity> scheduleComponents(final URI exampleUri, final NiFiUser user, final String groupId, final Set<AffectedComponentEntity> components, final ScheduledState desiredState, final Pause pause) throws LifecycleManagementException {
final Set<String> componentIds = components.stream().map(component -> component.getId()).collect(Collectors.toSet());
final Map<String, AffectedComponentEntity> componentMap = components.stream().collect(Collectors.toMap(AffectedComponentEntity::getId, Function.identity()));
final Map<String, Revision> componentRevisionMap = getRevisions(groupId, componentIds);
final Map<String, RevisionDTO> componentRevisionDtoMap = componentRevisionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> dtoFactory.createRevisionDTO(entry.getValue())));
final ScheduleComponentsEntity scheduleProcessorsEntity = new ScheduleComponentsEntity();
scheduleProcessorsEntity.setComponents(componentRevisionDtoMap);
scheduleProcessorsEntity.setId(groupId);
scheduleProcessorsEntity.setState(desiredState.name());
URI scheduleGroupUri;
try {
scheduleGroupUri = new URI(exampleUri.getScheme(), exampleUri.getUserInfo(), exampleUri.getHost(), exampleUri.getPort(), "/nifi-api/flow/process-groups/" + groupId, null, exampleUri.getFragment());
} catch (URISyntaxException e) {
throw new RuntimeException(e);
}
final Map<String, String> headers = new HashMap<>();
headers.put("content-type", MediaType.APPLICATION_JSON);
// Determine whether we should replicate only to the cluster coordinator, or if we should replicate directly to the cluster nodes themselves.
try {
final NodeResponse clusterResponse;
if (getReplicationTarget() == ReplicationTarget.CLUSTER_NODES) {
clusterResponse = getRequestReplicator().replicate(user, HttpMethod.PUT, scheduleGroupUri, scheduleProcessorsEntity, headers).awaitMergedResponse();
} else {
clusterResponse = getRequestReplicator().forwardToCoordinator(getClusterCoordinatorNode(), user, HttpMethod.PUT, scheduleGroupUri, scheduleProcessorsEntity, headers).awaitMergedResponse();
}
final int scheduleComponentStatus = clusterResponse.getStatus();
if (scheduleComponentStatus != Status.OK.getStatusCode()) {
final String explanation = getResponseEntity(clusterResponse, String.class);
throw new LifecycleManagementException("Failed to transition components to a state of " + desiredState + " due to " + explanation);
}
final boolean processorsTransitioned = waitForProcessorStatus(user, exampleUri, groupId, componentMap, desiredState, pause);
if (!processorsTransitioned) {
throw new LifecycleManagementException("Failed while waiting for components to transition to state of " + desiredState);
}
} catch (final InterruptedException ie) {
Thread.currentThread().interrupt();
throw new LifecycleManagementException("Interrupted while attempting to transition components to state of " + desiredState);
}
final Set<AffectedComponentEntity> updatedEntities = components.stream().map(component -> AffectedComponentUtils.updateEntity(component, serviceFacade, dtoFactory, user)).collect(Collectors.toSet());
return updatedEntities;
}
Aggregations