use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class SearchUsersEndpointMerger method merge.
@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final TenantsEntity responseEntity = clientResponse.getClientResponse().readEntity(TenantsEntity.class);
final Collection<TenantEntity> userEntities = responseEntity.getUsers();
final Collection<TenantEntity> userGroupEntities = responseEntity.getUserGroups();
for (final NodeResponse nodeResponse : successfulResponses) {
final TenantsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(TenantsEntity.class);
// only retain users/groups that all nodes agree on
userEntities.retainAll(nodeResponseEntity.getUsers());
userGroupEntities.retainAll(nodeResponseEntity.getUserGroups());
}
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class StatusHistoryEndpointMerger method merge.
@Override
public NodeResponse merge(URI uri, String method, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses, NodeResponse clientResponse) {
final Map<String, MetricDescriptor<?>> metricDescriptors = getStandardMetricDescriptors(uri);
final StatusHistoryEntity responseEntity = clientResponse.getClientResponse().readEntity(StatusHistoryEntity.class);
final Set<StatusDescriptorDTO> fieldDescriptors = new LinkedHashSet<>();
boolean includeCounters = true;
StatusHistoryDTO lastStatusHistory = null;
final List<NodeStatusSnapshotsDTO> nodeStatusSnapshots = new ArrayList<>(successfulResponses.size());
LinkedHashMap<String, String> noReadPermissionsComponentDetails = null;
for (final NodeResponse nodeResponse : successfulResponses) {
final StatusHistoryEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(StatusHistoryEntity.class);
final StatusHistoryDTO nodeStatus = nodeResponseEntity.getStatusHistory();
lastStatusHistory = nodeStatus;
if (noReadPermissionsComponentDetails == null && !nodeResponseEntity.getCanRead()) {
// If component details from a history with no read permissions is encountered for the first time, hold on to them to be used in the merged response
noReadPermissionsComponentDetails = nodeStatus.getComponentDetails();
}
if (!Boolean.TRUE.equals(nodeResponseEntity.getCanRead())) {
includeCounters = false;
}
final NodeIdentifier nodeId = nodeResponse.getNodeId();
final NodeStatusSnapshotsDTO nodeStatusSnapshot = new NodeStatusSnapshotsDTO();
nodeStatusSnapshot.setNodeId(nodeId.getId());
nodeStatusSnapshot.setAddress(nodeId.getApiAddress());
nodeStatusSnapshot.setApiPort(nodeId.getApiPort());
nodeStatusSnapshot.setStatusSnapshots(nodeStatus.getAggregateSnapshots());
nodeStatusSnapshots.add(nodeStatusSnapshot);
final List<StatusDescriptorDTO> descriptors = nodeStatus.getFieldDescriptors();
if (descriptors != null) {
fieldDescriptors.addAll(descriptors);
}
}
// the user is not authorized, we want to assume that the user is, in fact, not authorized.
if (includeCounters) {
for (final StatusDescriptorDTO descriptorDto : fieldDescriptors) {
final String fieldName = descriptorDto.getField();
if (!metricDescriptors.containsKey(fieldName)) {
final ValueMapper<ProcessorStatus> valueMapper = s -> {
final Map<String, Long> counters = s.getCounters();
if (counters == null) {
return 0L;
}
return counters.getOrDefault(descriptorDto.getField(), 0L);
};
final MetricDescriptor<ProcessorStatus> metricDescriptor = new StandardMetricDescriptor<>(descriptorDto.getField(), descriptorDto.getLabel(), descriptorDto.getDescription(), Formatter.COUNT, valueMapper);
metricDescriptors.put(fieldName, metricDescriptor);
}
}
}
final StatusHistoryDTO clusterStatusHistory = new StatusHistoryDTO();
clusterStatusHistory.setAggregateSnapshots(mergeStatusHistories(nodeStatusSnapshots, metricDescriptors));
clusterStatusHistory.setGenerated(new Date());
clusterStatusHistory.setNodeSnapshots(nodeStatusSnapshots);
if (lastStatusHistory != null) {
clusterStatusHistory.setComponentDetails(noReadPermissionsComponentDetails == null ? lastStatusHistory.getComponentDetails() : noReadPermissionsComponentDetails);
}
clusterStatusHistory.setFieldDescriptors(new ArrayList<>(fieldDescriptors));
final StatusHistoryEntity clusterEntity = new StatusHistoryEntity();
clusterEntity.setStatusHistory(clusterStatusHistory);
clusterEntity.setCanRead(noReadPermissionsComponentDetails == null);
return new NodeResponse(clientResponse, clusterEntity);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class UsersEndpointMerger method merge.
@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
if (!canHandle(uri, method)) {
throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
}
final UsersEntity responseEntity = clientResponse.getClientResponse().readEntity(UsersEntity.class);
final Collection<UserEntity> userEntities = responseEntity.getUsers();
final Map<String, Map<NodeIdentifier, UserEntity>> entityMap = new HashMap<>();
for (final NodeResponse nodeResponse : successfulResponses) {
final UsersEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(UsersEntity.class);
final Collection<UserEntity> nodeUserEntities = nodeResponseEntity.getUsers();
// only retain users that all nodes agree on
userEntities.retainAll(nodeUserEntities);
for (final UserEntity nodeUserEntity : nodeUserEntities) {
final NodeIdentifier nodeId = nodeResponse.getNodeId();
Map<NodeIdentifier, UserEntity> innerMap = entityMap.get(nodeId);
if (innerMap == null) {
innerMap = new HashMap<>();
entityMap.put(nodeUserEntity.getId(), innerMap);
}
innerMap.put(nodeResponse.getNodeId(), nodeUserEntity);
}
}
UsersEntityMerger.mergeUsers(userEntities, entityMap);
// create a new client response
return new NodeResponse(clientResponse, responseEntity);
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class NodeClusterCoordinator method afterRequest.
/**
* Callback that is called after an HTTP Request has been replicated to
* nodes in the cluster. This allows us to disconnect nodes that did not
* complete the request, if applicable.
*/
@Override
public void afterRequest(final String uriPath, final String method, final Set<NodeResponse> nodeResponses) {
// as the cluster coordinator is responsible for performing the actual request replication.
if (!isActiveClusterCoordinator()) {
return;
}
final boolean mutableRequest = isMutableRequest(method);
/*
* Nodes that encountered issues handling the request are marked as
* disconnected for mutable requests (e.g., post, put, delete). For
* other requests (e.g., get, head), the nodes remain in their current
* state even if they had problems handling the request.
*/
if (mutableRequest) {
final HttpResponseMapper responseMerger = new StandardHttpResponseMapper(nifiProperties);
final Set<NodeResponse> problematicNodeResponses = responseMerger.getProblematicNodeResponses(nodeResponses);
// all nodes failed
final boolean allNodesFailed = problematicNodeResponses.size() == nodeResponses.size();
// some nodes had a problematic response because of a missing counter, ensure the are not disconnected
final boolean someNodesFailedMissingCounter = !problematicNodeResponses.isEmpty() && problematicNodeResponses.size() < nodeResponses.size() && isMissingCounter(problematicNodeResponses, uriPath);
// ensure nodes stay connected in certain scenarios
if (allNodesFailed) {
logger.warn("All nodes failed to process URI {} {}. As a result, no node will be disconnected from cluster", method, uriPath);
return;
}
if (someNodesFailedMissingCounter) {
return;
}
// disconnect problematic nodes
if (!problematicNodeResponses.isEmpty() && problematicNodeResponses.size() < nodeResponses.size()) {
final Set<NodeIdentifier> failedNodeIds = problematicNodeResponses.stream().map(response -> response.getNodeId()).collect(Collectors.toSet());
logger.warn(String.format("The following nodes failed to process URI %s '%s'. Requesting each node disconnect from cluster.", uriPath, failedNodeIds));
for (final NodeIdentifier nodeId : failedNodeIds) {
requestNodeDisconnect(nodeId, DisconnectionCode.FAILED_TO_SERVICE_REQUEST, "Failed to process request " + method + " " + uriPath);
}
}
}
}
use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.
the class TestResponseUtils method testFindLongResponseTimes.
@Test
public void testFindLongResponseTimes() throws URISyntaxException {
final Map<NodeIdentifier, NodeResponse> responses = new HashMap<>();
final NodeIdentifier id1 = new NodeIdentifier("1", "localhost", 8000, "localhost", 8001, "localhost", 8002, 8003, false);
final NodeIdentifier id2 = new NodeIdentifier("2", "localhost", 8200, "localhost", 8201, "localhost", 8202, 8203, false);
final NodeIdentifier id3 = new NodeIdentifier("3", "localhost", 8300, "localhost", 8301, "localhost", 8302, 8303, false);
final NodeIdentifier id4 = new NodeIdentifier("4", "localhost", 8400, "localhost", 8401, "localhost", 8402, 8403, false);
final URI uri = new URI("localhost:8080");
final Response clientResponse = mock(Response.class);
responses.put(id1, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(80), "1"));
responses.put(id2, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(92), "1"));
responses.put(id3, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(3), "1"));
responses.put(id4, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(120), "1"));
final AsyncClusterResponse response = new AsyncClusterResponse() {
@Override
public String getRequestIdentifier() {
return "1";
}
@Override
public String getMethod() {
return "GET";
}
@Override
public String getURIPath() {
return null;
}
@Override
public Set<NodeIdentifier> getNodesInvolved() {
return new HashSet<>(responses.keySet());
}
@Override
public Set<NodeIdentifier> getCompletedNodeIdentifiers() {
return getNodesInvolved();
}
@Override
public boolean isComplete() {
return true;
}
@Override
public boolean isOlderThan(long time, TimeUnit timeUnit) {
return true;
}
@Override
public NodeResponse getMergedResponse() {
return null;
}
@Override
public NodeResponse awaitMergedResponse() throws InterruptedException {
return null;
}
@Override
public NodeResponse awaitMergedResponse(long timeout, TimeUnit timeUnit) throws InterruptedException {
return null;
}
@Override
public NodeResponse getNodeResponse(NodeIdentifier nodeId) {
return responses.get(nodeId);
}
@Override
public Set<NodeResponse> getCompletedNodeResponses() {
return new HashSet<>(responses.values());
}
};
Set<NodeIdentifier> slowResponses = ResponseUtils.findLongResponseTimes(response, 1.5D);
assertTrue(slowResponses.isEmpty());
responses.put(id4, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(2500), "1"));
slowResponses = ResponseUtils.findLongResponseTimes(response, 1.5D);
assertEquals(1, slowResponses.size());
assertEquals(id4, slowResponses.iterator().next());
}
Aggregations