use of org.apache.nifi.cluster.coordination.ClusterCoordinator in project nifi by apache.
the class ProvenanceEventResource method getProvenanceEvent.
/**
* Gets the details for a provenance event.
*
* @param id The id of the event
* @param clusterNodeId The id of node in the cluster that the event/flowfile originated from. This is only required when clustered.
* @return A provenanceEventEntity
*/
@GET
@Consumes(MediaType.WILDCARD)
@Produces(MediaType.APPLICATION_JSON)
@Path("{id}")
@ApiOperation(value = "Gets a provenance event", response = ProvenanceEventEntity.class, authorizations = { @Authorization(value = "Read Component Data - /data/{component-type}/{uuid}") })
@ApiResponses(value = { @ApiResponse(code = 400, message = "NiFi was unable to complete the request because it was invalid. The request should not be retried without modification."), @ApiResponse(code = 401, message = "Client could not be authenticated."), @ApiResponse(code = 403, message = "Client is not authorized to make this request."), @ApiResponse(code = 404, message = "The specified resource could not be found."), @ApiResponse(code = 409, message = "The request was valid but NiFi was not in the appropriate state to process it. Retrying the same request later may be successful.") })
public Response getProvenanceEvent(@ApiParam(value = "The id of the node where this event exists if clustered.", required = false) @QueryParam("clusterNodeId") final String clusterNodeId, @ApiParam(value = "The provenance event id.", required = true) @PathParam("id") final LongParameter id) {
// ensure the id is specified
if (id == null) {
throw new IllegalArgumentException("Provenance event id must be specified.");
}
// replicate if cluster manager
if (isReplicateRequest()) {
// since we're cluster we must specify the cluster node identifier
if (clusterNodeId == null) {
throw new IllegalArgumentException("The cluster node identifier must be specified.");
}
return replicate(HttpMethod.GET, clusterNodeId);
}
// get the provenance event
final ProvenanceEventDTO event = serviceFacade.getProvenanceEvent(id.getLong());
event.setClusterNodeId(clusterNodeId);
// populate the cluster node address
final ClusterCoordinator coordinator = getClusterCoordinator();
if (coordinator != null) {
final NodeIdentifier nodeId = coordinator.getNodeIdentifier(clusterNodeId);
event.setClusterNodeAddress(nodeId.getApiAddress() + ":" + nodeId.getApiPort());
}
// create a response entity
final ProvenanceEventEntity entity = new ProvenanceEventEntity();
entity.setProvenanceEvent(event);
// generate the response
return generateOkResponse(entity).build();
}
use of org.apache.nifi.cluster.coordination.ClusterCoordinator in project nifi by apache.
the class TestSiteToSiteResource method testPeersClustered.
@Test
public void testPeersClustered() throws Exception {
final HttpServletRequest req = createCommonHttpServletRequest();
final NiFiServiceFacade serviceFacade = mock(NiFiServiceFacade.class);
final Map<String, String> clusterSettings = new HashMap<>();
clusterSettings.put(NiFiProperties.CLUSTER_IS_NODE, "true");
final SiteToSiteResource resource = getSiteToSiteResource(serviceFacade, clusterSettings);
final ClusterCoordinator clusterCoordinator = mock(ClusterCoordinator.class);
final Map<String, NodeWorkload> hostportWorkloads = new HashMap<>();
final Map<NodeIdentifier, NodeWorkload> workloads = new HashMap<>();
IntStream.range(1, 4).forEach(i -> {
final String hostname = "node" + i;
final int siteToSiteHttpApiPort = 8110 + i;
final NodeIdentifier nodeId = new NodeIdentifier(hostname, hostname, 8080 + i, hostname, 8090 + i, hostname, 8100 + i, siteToSiteHttpApiPort, false);
final NodeWorkload workload = new NodeWorkload();
workload.setReportedTimestamp(System.currentTimeMillis() - i);
workload.setFlowFileBytes(1024 * i);
workload.setFlowFileCount(10 * i);
workload.setActiveThreadCount(i);
workload.setSystemStartTime(System.currentTimeMillis() - (1000 * i));
workloads.put(nodeId, workload);
hostportWorkloads.put(hostname + ":" + siteToSiteHttpApiPort, workload);
});
when(clusterCoordinator.getClusterWorkload()).thenReturn(workloads);
resource.setClusterCoordinator(clusterCoordinator);
final Response response = resource.getPeers(req);
PeersEntity resultEntity = (PeersEntity) response.getEntity();
assertEquals(200, response.getStatus());
assertEquals(3, resultEntity.getPeers().size());
resultEntity.getPeers().stream().forEach(peerDTO -> {
final NodeWorkload workload = hostportWorkloads.get(peerDTO.getHostname() + ":" + peerDTO.getPort());
assertNotNull(workload);
assertEquals(workload.getFlowFileCount(), peerDTO.getFlowFileCount());
});
}
Aggregations