use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class ProcessorsEndpointMerger method merge.
@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final ProcessorsEntity responseEntity = clientResponse.getClientResponse().readEntity(ProcessorsEntity.class);
final Set<ProcessorEntity> processorEntities = responseEntity.getProcessors();
final Map<String, Map<NodeIdentifier, ProcessorEntity>> entityMap = new HashMap<>();
for (final NodeResponse nodeResponse : successfulResponses) {
final ProcessorsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(ProcessorsEntity.class);
final Set<ProcessorEntity> nodeProcessorEntities = nodeResponseEntity.getProcessors();
for (final ProcessorEntity nodeProcessorEntity : nodeProcessorEntities) {
final NodeIdentifier nodeId = nodeResponse.getNodeId();
Map<NodeIdentifier, ProcessorEntity> innerMap = entityMap.get(nodeId);
if (innerMap == null) {
innerMap = new HashMap<>();
entityMap.put(nodeProcessorEntity.getId(), innerMap);
}
innerMap.put(nodeResponse.getNodeId(), nodeProcessorEntity);
}
}
ProcessorsEntityMerger.mergeProcessors(processorEntities, entityMap);
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class ProvenanceQueryEndpointMerger method mergeResponses.
protected void mergeResponses(ProvenanceDTO clientDto, Map<NodeIdentifier, ProvenanceDTO> dtoMap, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses) {
final ProvenanceResultsDTO results = clientDto.getResults();
final ProvenanceRequestDTO request = clientDto.getRequest();
final List<ProvenanceEventDTO> allResults = new ArrayList<>(1024);
final Set<String> errors = new HashSet<>();
Date oldestEventDate = new Date();
int percentageComplete = 0;
boolean finished = true;
long totalRecords = 0;
for (final Map.Entry<NodeIdentifier, ProvenanceDTO> entry : dtoMap.entrySet()) {
final NodeIdentifier nodeIdentifier = entry.getKey();
final String nodeAddress = nodeIdentifier.getApiAddress() + ":" + nodeIdentifier.getApiPort();
final ProvenanceDTO nodeDto = entry.getValue();
final ProvenanceResultsDTO nodeResultDto = nodeDto.getResults();
if (nodeResultDto != null && nodeResultDto.getProvenanceEvents() != null) {
// increment the total number of records
totalRecords += nodeResultDto.getTotalCount();
// populate the cluster identifier
for (final ProvenanceEventDTO eventDto : nodeResultDto.getProvenanceEvents()) {
// from the Cluster Coordinator.
if (eventDto.getClusterNodeId() == null || eventDto.getClusterNodeAddress() == null) {
eventDto.setClusterNodeId(nodeIdentifier.getId());
eventDto.setClusterNodeAddress(nodeAddress);
// add node identifier to the event's id so that it is unique across cluster
eventDto.setId(nodeIdentifier.getId() + eventDto.getId());
}
allResults.add(eventDto);
}
}
if (nodeResultDto.getOldestEvent() != null && nodeResultDto.getOldestEvent().before(oldestEventDate)) {
oldestEventDate = nodeResultDto.getOldestEvent();
}
if (nodeResultDto.getErrors() != null) {
for (final String error : nodeResultDto.getErrors()) {
errors.add(nodeAddress + " -- " + error);
}
}
percentageComplete += nodeDto.getPercentCompleted();
if (!nodeDto.isFinished()) {
finished = false;
}
}
percentageComplete /= dtoMap.size();
// consider any problematic responses as errors
for (final NodeResponse problematicResponse : problematicResponses) {
final NodeIdentifier problemNode = problematicResponse.getNodeId();
final String problemNodeAddress = problemNode.getApiAddress() + ":" + problemNode.getApiPort();
errors.add(String.format("%s -- Request did not complete successfully (Status code: %s)", problemNodeAddress, problematicResponse.getStatus()));
}
// Since we get back up to the maximum number of results from each node, we need to sort those values and then
// grab only the first X number of them. We do a sort based on time, such that the newest are included.
// If 2 events have the same timestamp, we do a secondary sort based on Cluster Node Identifier. If those are
// equal, we perform a tertiary sort based on the the event id
Collections.sort(allResults, new Comparator<ProvenanceEventDTO>() {
@Override
public int compare(final ProvenanceEventDTO o1, final ProvenanceEventDTO o2) {
final int eventTimeComparison = o1.getEventTime().compareTo(o2.getEventTime());
if (eventTimeComparison != 0) {
return -eventTimeComparison;
}
final String nodeId1 = o1.getClusterNodeId();
final String nodeId2 = o2.getClusterNodeId();
final int nodeIdComparison;
if (nodeId1 == null && nodeId2 == null) {
nodeIdComparison = 0;
} else if (nodeId1 == null) {
nodeIdComparison = 1;
} else if (nodeId2 == null) {
nodeIdComparison = -1;
} else {
nodeIdComparison = -nodeId1.compareTo(nodeId2);
}
if (nodeIdComparison != 0) {
return nodeIdComparison;
}
return -Long.compare(o1.getEventId(), o2.getEventId());
}
});
final int maxResults = request.getMaxResults().intValue();
final List<ProvenanceEventDTO> selectedResults;
if (allResults.size() < maxResults) {
selectedResults = allResults;
} else {
selectedResults = allResults.subList(0, maxResults);
}
// include any errors
if (errors.size() > 0) {
results.setErrors(errors);
}
if (clientDto.getRequest().getMaxResults() != null && totalRecords >= clientDto.getRequest().getMaxResults()) {
results.setTotalCount(clientDto.getRequest().getMaxResults().longValue());
results.setTotal(FormatUtils.formatCount(clientDto.getRequest().getMaxResults().longValue()) + "+");
} else {
results.setTotal(FormatUtils.formatCount(totalRecords));
results.setTotalCount(totalRecords);
}
results.setProvenanceEvents(selectedResults);
results.setOldestEvent(oldestEventDate);
results.setGenerated(new Date());
clientDto.setPercentCompleted(percentageComplete);
clientDto.setFinished(finished);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class RemoteProcessGroupStatusEndpointMerger method mergeResponses.
@Override
protected void mergeResponses(RemoteProcessGroupStatusEntity clientEntity, Map<NodeIdentifier, RemoteProcessGroupStatusEntity> entityMap, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses) {
final RemoteProcessGroupStatusDTO mergedRemoteProcessGroupStatus = clientEntity.getRemoteProcessGroupStatus();
mergedRemoteProcessGroupStatus.setNodeSnapshots(new ArrayList<>());
final NodeIdentifier selectedNodeId = entityMap.entrySet().stream().filter(e -> e.getValue() == clientEntity).map(e -> e.getKey()).findFirst().orElse(null);
final NodeRemoteProcessGroupStatusSnapshotDTO selectedNodeSnapshot = new NodeRemoteProcessGroupStatusSnapshotDTO();
selectedNodeSnapshot.setStatusSnapshot(mergedRemoteProcessGroupStatus.getAggregateSnapshot().clone());
selectedNodeSnapshot.setAddress(selectedNodeId.getApiAddress());
selectedNodeSnapshot.setApiPort(selectedNodeId.getApiPort());
selectedNodeSnapshot.setNodeId(selectedNodeId.getId());
mergedRemoteProcessGroupStatus.getNodeSnapshots().add(selectedNodeSnapshot);
// merge the other nodes
for (final Map.Entry<NodeIdentifier, RemoteProcessGroupStatusEntity> entry : entityMap.entrySet()) {
final NodeIdentifier nodeId = entry.getKey();
final RemoteProcessGroupStatusEntity nodeRemoteProcessGroupStatusEntity = entry.getValue();
final RemoteProcessGroupStatusDTO nodeRemoteProcessGroupStatus = nodeRemoteProcessGroupStatusEntity.getRemoteProcessGroupStatus();
if (nodeRemoteProcessGroupStatus == mergedRemoteProcessGroupStatus) {
continue;
}
mergeStatus(mergedRemoteProcessGroupStatus, clientEntity.getCanRead(), nodeRemoteProcessGroupStatus, nodeRemoteProcessGroupStatusEntity.getCanRead(), nodeId);
}
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class RemoteProcessGroupsEndpointMerger method merge.
@Override
public NodeResponse merge(URI uri, String method, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses, NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final RemoteProcessGroupsEntity responseEntity = clientResponse.getClientResponse().readEntity(RemoteProcessGroupsEntity.class);
final Set<RemoteProcessGroupEntity> rpgEntities = responseEntity.getRemoteProcessGroups();
final Map<String, Map<NodeIdentifier, RemoteProcessGroupEntity>> entityMap = new HashMap<>();
for (final NodeResponse nodeResponse : successfulResponses) {
final RemoteProcessGroupsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(RemoteProcessGroupsEntity.class);
final Set<RemoteProcessGroupEntity> nodeRpgEntities = nodeResponseEntity.getRemoteProcessGroups();
for (final RemoteProcessGroupEntity nodeRpgEntity : nodeRpgEntities) {
final NodeIdentifier nodeId = nodeResponse.getNodeId();
Map<NodeIdentifier, RemoteProcessGroupEntity> innerMap = entityMap.get(nodeId);
if (innerMap == null) {
innerMap = new HashMap<>();
entityMap.put(nodeRpgEntity.getId(), innerMap);
}
innerMap.put(nodeResponse.getNodeId(), nodeRpgEntity);
}
}
RemoteProcessGroupsEntityMerger.mergeRemoteProcessGroups(rpgEntities, entityMap);
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class TemplatesEndpointMerger method merge.
@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final TemplatesEntity responseEntity = clientResponse.getClientResponse().readEntity(getEntityClass());
// Find the templates that all nodes know about. We do this by mapping Template ID to Template and
// then for each node, removing any template whose ID is not known to that node. After iterating over
// all of the nodes, we are left with a Map whose contents are those Templates known by all nodes.
Map<String, TemplateEntity> templatesById = null;
for (final NodeResponse nodeResponse : successfulResponses) {
final TemplatesEntity entity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(TemplatesEntity.class);
final Set<TemplateEntity> templateEntities = entity.getTemplates();
final Map<String, TemplateEntity> nodeTemplatesById = templateEntities.stream().collect(Collectors.toMap(ent -> ent.getId(), ent -> ent));
if (templatesById == null) {
// Create new HashMap so that the map that we have is modifiable.
templatesById = new HashMap<>(nodeTemplatesById);
} else {
// Only keep templates that are known by this node.
templatesById.keySet().retainAll(nodeTemplatesById.keySet());
}
}
// Set the templates to the set of templates that all nodes know about
responseEntity.setTemplates(new HashSet<>(templatesById.values()));
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
Aggregations