use of org.apache.nifi.cluster.protocol.NodeIdentifier in project nifi by apache.
the class GroupStatusEndpointMerger method mergeResponses.
@Override
protected void mergeResponses(ProcessGroupStatusEntity clientEntity, Map<NodeIdentifier, ProcessGroupStatusEntity> entityMap, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses) {
final ProcessGroupStatusDTO mergedProcessGroupStatus = clientEntity.getProcessGroupStatus();
mergedProcessGroupStatus.setNodeSnapshots(new ArrayList<>());
final NodeIdentifier selectedNodeId = entityMap.entrySet().stream().filter(e -> e.getValue() == clientEntity).map(e -> e.getKey()).findFirst().orElse(null);
final NodeProcessGroupStatusSnapshotDTO selectedNodeSnapshot = new NodeProcessGroupStatusSnapshotDTO();
selectedNodeSnapshot.setStatusSnapshot(mergedProcessGroupStatus.getAggregateSnapshot().clone());
selectedNodeSnapshot.setAddress(selectedNodeId.getApiAddress());
selectedNodeSnapshot.setApiPort(selectedNodeId.getApiPort());
selectedNodeSnapshot.setNodeId(selectedNodeId.getId());
mergedProcessGroupStatus.getNodeSnapshots().add(selectedNodeSnapshot);
for (final Map.Entry<NodeIdentifier, ProcessGroupStatusEntity> entry : entityMap.entrySet()) {
final NodeIdentifier nodeId = entry.getKey();
final ProcessGroupStatusEntity nodeProcessGroupStatusEntity = entry.getValue();
final ProcessGroupStatusDTO nodeProcessGroupStatus = nodeProcessGroupStatusEntity.getProcessGroupStatus();
if (nodeProcessGroupStatus == mergedProcessGroupStatus) {
continue;
}
mergeStatus(mergedProcessGroupStatus, clientEntity.getCanRead(), nodeProcessGroupStatus, nodeProcessGroupStatusEntity.getCanRead(), nodeId);
}
}
use of org.apache.nifi.cluster.protocol.NodeIdentifier in project nifi by apache.
the class LabelsEndpointMerger method merge.
@Override
public NodeResponse merge(URI uri, String method, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses, NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final LabelsEntity responseEntity = clientResponse.getClientResponse().readEntity(LabelsEntity.class);
final Set<LabelEntity> labelEntities = responseEntity.getLabels();
final Map<String, Map<NodeIdentifier, LabelEntity>> entityMap = new HashMap<>();
for (final NodeResponse nodeResponse : successfulResponses) {
final LabelsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(LabelsEntity.class);
final Set<LabelEntity> nodeLabelEntities = nodeResponseEntity.getLabels();
for (final LabelEntity nodeLabelEntity : nodeLabelEntities) {
final String nodeLabelEntityId = nodeLabelEntity.getId();
Map<NodeIdentifier, LabelEntity> innerMap = entityMap.get(nodeLabelEntityId);
if (innerMap == null) {
innerMap = new HashMap<>();
entityMap.put(nodeLabelEntityId, innerMap);
}
innerMap.put(nodeResponse.getNodeId(), nodeLabelEntity);
}
}
LabelsEntityMerger.mergeLabels(labelEntities, entityMap);
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
use of org.apache.nifi.cluster.protocol.NodeIdentifier in project nifi by apache.
the class ListFlowFilesEndpointMerger method mergeResponses.
@Override
protected void mergeResponses(ListingRequestDTO clientDto, Map<NodeIdentifier, ListingRequestDTO> dtoMap, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses) {
final Comparator<FlowFileSummaryDTO> comparator = new Comparator<FlowFileSummaryDTO>() {
@Override
public int compare(final FlowFileSummaryDTO dto1, final FlowFileSummaryDTO dto2) {
int positionCompare = dto1.getPosition().compareTo(dto2.getPosition());
if (positionCompare != 0) {
return positionCompare;
}
final String address1 = dto1.getClusterNodeAddress();
final String address2 = dto2.getClusterNodeAddress();
if (address1 == null && address2 == null) {
return 0;
}
if (address1 == null) {
return 1;
}
if (address2 == null) {
return -1;
}
return address1.compareTo(address2);
}
};
final NavigableSet<FlowFileSummaryDTO> flowFileSummaries = new TreeSet<>(comparator);
ListFlowFileState state = null;
int numStepsCompleted = 0;
int numStepsTotal = 0;
int objectCount = 0;
long byteCount = 0;
boolean finished = true;
for (final Map.Entry<NodeIdentifier, ListingRequestDTO> entry : dtoMap.entrySet()) {
final NodeIdentifier nodeIdentifier = entry.getKey();
final String nodeAddress = nodeIdentifier.getApiAddress() + ":" + nodeIdentifier.getApiPort();
final ListingRequestDTO nodeRequest = entry.getValue();
numStepsTotal++;
if (Boolean.TRUE.equals(nodeRequest.getFinished())) {
numStepsCompleted++;
}
final QueueSizeDTO nodeQueueSize = nodeRequest.getQueueSize();
objectCount += nodeQueueSize.getObjectCount();
byteCount += nodeQueueSize.getByteCount();
if (!nodeRequest.getFinished()) {
finished = false;
}
if (nodeRequest.getLastUpdated().after(clientDto.getLastUpdated())) {
clientDto.setLastUpdated(nodeRequest.getLastUpdated());
}
// Keep the state with the lowest ordinal value (the "least completed").
final ListFlowFileState nodeState = ListFlowFileState.valueOfDescription(nodeRequest.getState());
if (state == null || state.compareTo(nodeState) > 0) {
state = nodeState;
}
if (nodeRequest.getFlowFileSummaries() != null) {
for (final FlowFileSummaryDTO summaryDTO : nodeRequest.getFlowFileSummaries()) {
if (summaryDTO.getClusterNodeId() == null || summaryDTO.getClusterNodeAddress() == null) {
summaryDTO.setClusterNodeId(nodeIdentifier.getId());
summaryDTO.setClusterNodeAddress(nodeAddress);
}
flowFileSummaries.add(summaryDTO);
// Keep the set from growing beyond our max
if (flowFileSummaries.size() > clientDto.getMaxResults()) {
flowFileSummaries.pollLast();
}
}
}
if (nodeRequest.getFailureReason() != null) {
clientDto.setFailureReason(nodeRequest.getFailureReason());
}
}
final List<FlowFileSummaryDTO> summaryDTOs = new ArrayList<>(flowFileSummaries);
clientDto.setFlowFileSummaries(summaryDTOs);
// depends on invariant if numStepsTotal is 0, so is numStepsCompleted, all steps being completed
// would be 1
final int percentCompleted = (numStepsTotal == 0) ? 1 : numStepsCompleted / numStepsTotal;
clientDto.setPercentCompleted(percentCompleted);
clientDto.setFinished(finished);
clientDto.getQueueSize().setByteCount(byteCount);
clientDto.getQueueSize().setObjectCount(objectCount);
}
use of org.apache.nifi.cluster.protocol.NodeIdentifier in project nifi by apache.
the class OutputPortsEndpointMerger method merge.
@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final OutputPortsEntity responseEntity = clientResponse.getClientResponse().readEntity(OutputPortsEntity.class);
final Set<PortEntity> portEntities = responseEntity.getOutputPorts();
final Map<String, Map<NodeIdentifier, PortEntity>> entityMap = new HashMap<>();
for (final NodeResponse nodeResponse : successfulResponses) {
final OutputPortsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(OutputPortsEntity.class);
final Set<PortEntity> nodePortEntities = nodeResponseEntity.getOutputPorts();
for (final PortEntity nodePortEntity : nodePortEntities) {
final NodeIdentifier nodeId = nodeResponse.getNodeId();
Map<NodeIdentifier, PortEntity> innerMap = entityMap.get(nodeId);
if (innerMap == null) {
innerMap = new HashMap<>();
entityMap.put(nodePortEntity.getId(), innerMap);
}
innerMap.put(nodeResponse.getNodeId(), nodePortEntity);
}
}
PortsEntityMerger.mergePorts(portEntities, entityMap);
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
use of org.apache.nifi.cluster.protocol.NodeIdentifier in project nifi by apache.
the class ProcessGroupsEndpointMerger method merge.
@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final ProcessGroupsEntity responseEntity = clientResponse.getClientResponse().readEntity(ProcessGroupsEntity.class);
final Set<ProcessGroupEntity> processGroupEntities = responseEntity.getProcessGroups();
final Map<String, Map<NodeIdentifier, ProcessGroupEntity>> entityMap = new HashMap<>();
for (final NodeResponse nodeResponse : successfulResponses) {
final ProcessGroupsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(ProcessGroupsEntity.class);
final Set<ProcessGroupEntity> nodeProcessGroupEntities = nodeResponseEntity.getProcessGroups();
for (final ProcessGroupEntity nodeProcessGroupEntity : nodeProcessGroupEntities) {
final NodeIdentifier nodeId = nodeResponse.getNodeId();
Map<NodeIdentifier, ProcessGroupEntity> innerMap = entityMap.get(nodeId);
if (innerMap == null) {
innerMap = new HashMap<>();
entityMap.put(nodeProcessGroupEntity.getId(), innerMap);
}
innerMap.put(nodeResponse.getNodeId(), nodeProcessGroupEntity);
}
}
ProcessGroupsEntityMerger.mergeProcessGroups(processGroupEntities, entityMap);
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
Aggregations