Search in sources :

Example 36 with NodeResponse

use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.

the class SearchUsersEndpointMerger method merge.

@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
    if (!canHandle(uri, method)) {
        throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
    }
    final TenantsEntity responseEntity = clientResponse.getClientResponse().readEntity(TenantsEntity.class);
    final Collection<TenantEntity> userEntities = responseEntity.getUsers();
    final Collection<TenantEntity> userGroupEntities = responseEntity.getUserGroups();
    for (final NodeResponse nodeResponse : successfulResponses) {
        final TenantsEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(TenantsEntity.class);
        // only retain users/groups that all nodes agree on
        userEntities.retainAll(nodeResponseEntity.getUsers());
        userGroupEntities.retainAll(nodeResponseEntity.getUserGroups());
    }
    // create a new client response
    return new NodeResponse(clientResponse, responseEntity);
}
Also used : TenantsEntity(org.apache.nifi.web.api.entity.TenantsEntity) TenantEntity(org.apache.nifi.web.api.entity.TenantEntity) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse)

Example 37 with NodeResponse

use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.

the class StatusHistoryEndpointMerger method merge.

@Override
public NodeResponse merge(URI uri, String method, Set<NodeResponse> successfulResponses, Set<NodeResponse> problematicResponses, NodeResponse clientResponse) {
    final Map<String, MetricDescriptor<?>> metricDescriptors = getStandardMetricDescriptors(uri);
    final StatusHistoryEntity responseEntity = clientResponse.getClientResponse().readEntity(StatusHistoryEntity.class);
    final Set<StatusDescriptorDTO> fieldDescriptors = new LinkedHashSet<>();
    boolean includeCounters = true;
    StatusHistoryDTO lastStatusHistory = null;
    final List<NodeStatusSnapshotsDTO> nodeStatusSnapshots = new ArrayList<>(successfulResponses.size());
    LinkedHashMap<String, String> noReadPermissionsComponentDetails = null;
    for (final NodeResponse nodeResponse : successfulResponses) {
        final StatusHistoryEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(StatusHistoryEntity.class);
        final StatusHistoryDTO nodeStatus = nodeResponseEntity.getStatusHistory();
        lastStatusHistory = nodeStatus;
        if (noReadPermissionsComponentDetails == null && !nodeResponseEntity.getCanRead()) {
            // If component details from a history with no read permissions is encountered for the first time, hold on to them to be used in the merged response
            noReadPermissionsComponentDetails = nodeStatus.getComponentDetails();
        }
        if (!Boolean.TRUE.equals(nodeResponseEntity.getCanRead())) {
            includeCounters = false;
        }
        final NodeIdentifier nodeId = nodeResponse.getNodeId();
        final NodeStatusSnapshotsDTO nodeStatusSnapshot = new NodeStatusSnapshotsDTO();
        nodeStatusSnapshot.setNodeId(nodeId.getId());
        nodeStatusSnapshot.setAddress(nodeId.getApiAddress());
        nodeStatusSnapshot.setApiPort(nodeId.getApiPort());
        nodeStatusSnapshot.setStatusSnapshots(nodeStatus.getAggregateSnapshots());
        nodeStatusSnapshots.add(nodeStatusSnapshot);
        final List<StatusDescriptorDTO> descriptors = nodeStatus.getFieldDescriptors();
        if (descriptors != null) {
            fieldDescriptors.addAll(descriptors);
        }
    }
    // the user is not authorized, we want to assume that the user is, in fact, not authorized.
    if (includeCounters) {
        for (final StatusDescriptorDTO descriptorDto : fieldDescriptors) {
            final String fieldName = descriptorDto.getField();
            if (!metricDescriptors.containsKey(fieldName)) {
                final ValueMapper<ProcessorStatus> valueMapper = s -> {
                    final Map<String, Long> counters = s.getCounters();
                    if (counters == null) {
                        return 0L;
                    }
                    return counters.getOrDefault(descriptorDto.getField(), 0L);
                };
                final MetricDescriptor<ProcessorStatus> metricDescriptor = new StandardMetricDescriptor<>(descriptorDto.getField(), descriptorDto.getLabel(), descriptorDto.getDescription(), Formatter.COUNT, valueMapper);
                metricDescriptors.put(fieldName, metricDescriptor);
            }
        }
    }
    final StatusHistoryDTO clusterStatusHistory = new StatusHistoryDTO();
    clusterStatusHistory.setAggregateSnapshots(mergeStatusHistories(nodeStatusSnapshots, metricDescriptors));
    clusterStatusHistory.setGenerated(new Date());
    clusterStatusHistory.setNodeSnapshots(nodeStatusSnapshots);
    if (lastStatusHistory != null) {
        clusterStatusHistory.setComponentDetails(noReadPermissionsComponentDetails == null ? lastStatusHistory.getComponentDetails() : noReadPermissionsComponentDetails);
    }
    clusterStatusHistory.setFieldDescriptors(new ArrayList<>(fieldDescriptors));
    final StatusHistoryEntity clusterEntity = new StatusHistoryEntity();
    clusterEntity.setStatusHistory(clusterStatusHistory);
    clusterEntity.setCanRead(noReadPermissionsComponentDetails == null);
    return new NodeResponse(clientResponse, clusterEntity);
}
Also used : LinkedHashSet(java.util.LinkedHashSet) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) StatusHistoryEntity(org.apache.nifi.web.api.entity.StatusHistoryEntity) ValueMapper(org.apache.nifi.controller.status.history.ValueMapper) StatusSnapshotDTO(org.apache.nifi.web.api.dto.status.StatusSnapshotDTO) Date(java.util.Date) HashMap(java.util.HashMap) StandardMetricDescriptor(org.apache.nifi.controller.status.history.StandardMetricDescriptor) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) ProcessorStatusDescriptor(org.apache.nifi.controller.status.history.ProcessorStatusDescriptor) URI(java.net.URI) RemoteProcessGroupStatusDescriptor(org.apache.nifi.controller.status.history.RemoteProcessGroupStatusDescriptor) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) ProcessorStatus(org.apache.nifi.controller.status.ProcessorStatus) LinkedHashSet(java.util.LinkedHashSet) Formatter(org.apache.nifi.controller.status.history.MetricDescriptor.Formatter) StatusHistoryUtil(org.apache.nifi.controller.status.history.StatusHistoryUtil) StatusSnapshot(org.apache.nifi.controller.status.history.StatusSnapshot) StandardStatusSnapshot(org.apache.nifi.controller.status.history.StandardStatusSnapshot) Set(java.util.Set) EndpointResponseMerger(org.apache.nifi.cluster.coordination.http.EndpointResponseMerger) List(java.util.List) TreeMap(java.util.TreeMap) MetricDescriptor(org.apache.nifi.controller.status.history.MetricDescriptor) ConnectionStatusDescriptor(org.apache.nifi.controller.status.history.ConnectionStatusDescriptor) ProcessGroupStatusDescriptor(org.apache.nifi.controller.status.history.ProcessGroupStatusDescriptor) StatusHistoryDTO(org.apache.nifi.web.api.dto.status.StatusHistoryDTO) NodeStatusSnapshotsDTO(org.apache.nifi.web.api.dto.status.NodeStatusSnapshotsDTO) Pattern(java.util.regex.Pattern) StatusDescriptorDTO(org.apache.nifi.web.api.dto.status.StatusDescriptorDTO) NodeStatusSnapshotsDTO(org.apache.nifi.web.api.dto.status.NodeStatusSnapshotsDTO) ArrayList(java.util.ArrayList) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) ProcessorStatus(org.apache.nifi.controller.status.ProcessorStatus) Date(java.util.Date) StatusDescriptorDTO(org.apache.nifi.web.api.dto.status.StatusDescriptorDTO) StandardMetricDescriptor(org.apache.nifi.controller.status.history.StandardMetricDescriptor) MetricDescriptor(org.apache.nifi.controller.status.history.MetricDescriptor) StatusHistoryDTO(org.apache.nifi.web.api.dto.status.StatusHistoryDTO) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) StandardMetricDescriptor(org.apache.nifi.controller.status.history.StandardMetricDescriptor) StatusHistoryEntity(org.apache.nifi.web.api.entity.StatusHistoryEntity) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) TreeMap(java.util.TreeMap)

Example 38 with NodeResponse

use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.

the class UsersEndpointMerger method merge.

@Override
public final NodeResponse merge(final URI uri, final String method, final Set<NodeResponse> successfulResponses, final Set<NodeResponse> problematicResponses, final NodeResponse clientResponse) {
    if (!canHandle(uri, method)) {
        throw new IllegalArgumentException("Cannot use Endpoint Mapper of type " + getClass().getSimpleName() + " to map responses for URI " + uri + ", HTTP Method " + method);
    }
    final UsersEntity responseEntity = clientResponse.getClientResponse().readEntity(UsersEntity.class);
    final Collection<UserEntity> userEntities = responseEntity.getUsers();
    final Map<String, Map<NodeIdentifier, UserEntity>> entityMap = new HashMap<>();
    for (final NodeResponse nodeResponse : successfulResponses) {
        final UsersEntity nodeResponseEntity = nodeResponse == clientResponse ? responseEntity : nodeResponse.getClientResponse().readEntity(UsersEntity.class);
        final Collection<UserEntity> nodeUserEntities = nodeResponseEntity.getUsers();
        // only retain users that all nodes agree on
        userEntities.retainAll(nodeUserEntities);
        for (final UserEntity nodeUserEntity : nodeUserEntities) {
            final NodeIdentifier nodeId = nodeResponse.getNodeId();
            Map<NodeIdentifier, UserEntity> innerMap = entityMap.get(nodeId);
            if (innerMap == null) {
                innerMap = new HashMap<>();
                entityMap.put(nodeUserEntity.getId(), innerMap);
            }
            innerMap.put(nodeResponse.getNodeId(), nodeUserEntity);
        }
    }
    UsersEntityMerger.mergeUsers(userEntities, entityMap);
    // create a new client response
    return new NodeResponse(clientResponse, responseEntity);
}
Also used : HashMap(java.util.HashMap) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) UsersEntity(org.apache.nifi.web.api.entity.UsersEntity) Map(java.util.Map) HashMap(java.util.HashMap) UserEntity(org.apache.nifi.web.api.entity.UserEntity)

Example 39 with NodeResponse

use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.

the class NodeClusterCoordinator method afterRequest.

/**
 * Callback that is called after an HTTP Request has been replicated to
 * nodes in the cluster. This allows us to disconnect nodes that did not
 * complete the request, if applicable.
 */
@Override
public void afterRequest(final String uriPath, final String method, final Set<NodeResponse> nodeResponses) {
    // as the cluster coordinator is responsible for performing the actual request replication.
    if (!isActiveClusterCoordinator()) {
        return;
    }
    final boolean mutableRequest = isMutableRequest(method);
    /*
         * Nodes that encountered issues handling the request are marked as
         * disconnected for mutable requests (e.g., post, put, delete). For
         * other requests (e.g., get, head), the nodes remain in their current
         * state even if they had problems handling the request.
         */
    if (mutableRequest) {
        final HttpResponseMapper responseMerger = new StandardHttpResponseMapper(nifiProperties);
        final Set<NodeResponse> problematicNodeResponses = responseMerger.getProblematicNodeResponses(nodeResponses);
        // all nodes failed
        final boolean allNodesFailed = problematicNodeResponses.size() == nodeResponses.size();
        // some nodes had a problematic response because of a missing counter, ensure the are not disconnected
        final boolean someNodesFailedMissingCounter = !problematicNodeResponses.isEmpty() && problematicNodeResponses.size() < nodeResponses.size() && isMissingCounter(problematicNodeResponses, uriPath);
        // ensure nodes stay connected in certain scenarios
        if (allNodesFailed) {
            logger.warn("All nodes failed to process URI {} {}. As a result, no node will be disconnected from cluster", method, uriPath);
            return;
        }
        if (someNodesFailedMissingCounter) {
            return;
        }
        // disconnect problematic nodes
        if (!problematicNodeResponses.isEmpty() && problematicNodeResponses.size() < nodeResponses.size()) {
            final Set<NodeIdentifier> failedNodeIds = problematicNodeResponses.stream().map(response -> response.getNodeId()).collect(Collectors.toSet());
            logger.warn(String.format("The following nodes failed to process URI %s '%s'.  Requesting each node disconnect from cluster.", uriPath, failedNodeIds));
            for (final NodeIdentifier nodeId : failedNodeIds) {
                requestNodeDisconnect(nodeId, DisconnectionCode.FAILED_TO_SERVICE_REQUEST, "Failed to process request " + method + " " + uriPath);
            }
        }
    }
}
Also used : NodeProtocolSender(org.apache.nifi.cluster.protocol.NodeProtocolSender) ConnectionResponseMessage(org.apache.nifi.cluster.protocol.message.ConnectionResponseMessage) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) RequestCompletionCallback(org.apache.nifi.cluster.coordination.http.replication.RequestCompletionCallback) LoggerFactory(org.slf4j.LoggerFactory) FlowService(org.apache.nifi.services.FlowService) StringUtils(org.apache.commons.lang3.StringUtils) Map(java.util.Map) ReconnectionRequestMessage(org.apache.nifi.cluster.protocol.message.ReconnectionRequestMessage) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) ConnectionRequestMessage(org.apache.nifi.cluster.protocol.message.ConnectionRequestMessage) ProtocolHandler(org.apache.nifi.cluster.protocol.ProtocolHandler) NodeEvent(org.apache.nifi.cluster.event.NodeEvent) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HttpResponseMapper(org.apache.nifi.cluster.coordination.http.HttpResponseMapper) Set(java.util.Set) RevisionManager(org.apache.nifi.web.revision.RevisionManager) UUID(java.util.UUID) StandardHttpResponseMapper(org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper) Collectors(java.util.stream.Collectors) StandardDataFlow(org.apache.nifi.cluster.protocol.StandardDataFlow) ClusterCoordinationProtocolSenderListener(org.apache.nifi.cluster.protocol.impl.ClusterCoordinationProtocolSenderListener) List(java.util.List) ConnectionResponse(org.apache.nifi.cluster.protocol.ConnectionResponse) Pattern(java.util.regex.Pattern) ComponentRevision(org.apache.nifi.cluster.protocol.ComponentRevision) DataFlow(org.apache.nifi.cluster.protocol.DataFlow) ConnectionRequest(org.apache.nifi.cluster.protocol.ConnectionRequest) ClusterWorkloadResponseMessage(org.apache.nifi.cluster.protocol.message.ClusterWorkloadResponseMessage) LeaderElectionManager(org.apache.nifi.controller.leader.election.LeaderElectionManager) Event(org.apache.nifi.cluster.event.Event) HashMap(java.util.HashMap) NodeConnectionStatusResponseMessage(org.apache.nifi.cluster.protocol.message.NodeConnectionStatusResponseMessage) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) CircularFifoQueue(org.apache.commons.collections4.queue.CircularFifoQueue) IllegalNodeDisconnectionException(org.apache.nifi.cluster.manager.exception.IllegalNodeDisconnectionException) ClusterCoordinator(org.apache.nifi.cluster.coordination.ClusterCoordinator) MessageType(org.apache.nifi.cluster.protocol.message.ProtocolMessage.MessageType) ClusterNodeFirewall(org.apache.nifi.cluster.firewall.ClusterNodeFirewall) NoClusterCoordinatorException(org.apache.nifi.cluster.exception.NoClusterCoordinatorException) Logger(org.slf4j.Logger) IOException(java.io.IOException) NodeStatusChangeMessage(org.apache.nifi.cluster.protocol.message.NodeStatusChangeMessage) DisconnectMessage(org.apache.nifi.cluster.protocol.message.DisconnectMessage) AtomicLong(java.util.concurrent.atomic.AtomicLong) FlowElection(org.apache.nifi.cluster.coordination.flow.FlowElection) ProtocolException(org.apache.nifi.cluster.protocol.ProtocolException) EventReporter(org.apache.nifi.events.EventReporter) NiFiProperties(org.apache.nifi.util.NiFiProperties) ClusterWorkloadRequestMessage(org.apache.nifi.cluster.protocol.message.ClusterWorkloadRequestMessage) Severity(org.apache.nifi.reporting.Severity) Collections(java.util.Collections) ProtocolMessage(org.apache.nifi.cluster.protocol.message.ProtocolMessage) HttpResponseMapper(org.apache.nifi.cluster.coordination.http.HttpResponseMapper) StandardHttpResponseMapper(org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) StandardHttpResponseMapper(org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper)

Example 40 with NodeResponse

use of org.apache.nifi.cluster.manager.NodeResponse in project nifi by apache.

the class TestResponseUtils method testFindLongResponseTimes.

@Test
public void testFindLongResponseTimes() throws URISyntaxException {
    final Map<NodeIdentifier, NodeResponse> responses = new HashMap<>();
    final NodeIdentifier id1 = new NodeIdentifier("1", "localhost", 8000, "localhost", 8001, "localhost", 8002, 8003, false);
    final NodeIdentifier id2 = new NodeIdentifier("2", "localhost", 8200, "localhost", 8201, "localhost", 8202, 8203, false);
    final NodeIdentifier id3 = new NodeIdentifier("3", "localhost", 8300, "localhost", 8301, "localhost", 8302, 8303, false);
    final NodeIdentifier id4 = new NodeIdentifier("4", "localhost", 8400, "localhost", 8401, "localhost", 8402, 8403, false);
    final URI uri = new URI("localhost:8080");
    final Response clientResponse = mock(Response.class);
    responses.put(id1, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(80), "1"));
    responses.put(id2, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(92), "1"));
    responses.put(id3, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(3), "1"));
    responses.put(id4, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(120), "1"));
    final AsyncClusterResponse response = new AsyncClusterResponse() {

        @Override
        public String getRequestIdentifier() {
            return "1";
        }

        @Override
        public String getMethod() {
            return "GET";
        }

        @Override
        public String getURIPath() {
            return null;
        }

        @Override
        public Set<NodeIdentifier> getNodesInvolved() {
            return new HashSet<>(responses.keySet());
        }

        @Override
        public Set<NodeIdentifier> getCompletedNodeIdentifiers() {
            return getNodesInvolved();
        }

        @Override
        public boolean isComplete() {
            return true;
        }

        @Override
        public boolean isOlderThan(long time, TimeUnit timeUnit) {
            return true;
        }

        @Override
        public NodeResponse getMergedResponse() {
            return null;
        }

        @Override
        public NodeResponse awaitMergedResponse() throws InterruptedException {
            return null;
        }

        @Override
        public NodeResponse awaitMergedResponse(long timeout, TimeUnit timeUnit) throws InterruptedException {
            return null;
        }

        @Override
        public NodeResponse getNodeResponse(NodeIdentifier nodeId) {
            return responses.get(nodeId);
        }

        @Override
        public Set<NodeResponse> getCompletedNodeResponses() {
            return new HashSet<>(responses.values());
        }
    };
    Set<NodeIdentifier> slowResponses = ResponseUtils.findLongResponseTimes(response, 1.5D);
    assertTrue(slowResponses.isEmpty());
    responses.put(id4, new NodeResponse(id1, "GET", uri, clientResponse, TimeUnit.MILLISECONDS.toNanos(2500), "1"));
    slowResponses = ResponseUtils.findLongResponseTimes(response, 1.5D);
    assertEquals(1, slowResponses.size());
    assertEquals(id4, slowResponses.iterator().next());
}
Also used : Response(javax.ws.rs.core.Response) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) HashMap(java.util.HashMap) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) TimeUnit(java.util.concurrent.TimeUnit) URI(java.net.URI) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

NodeResponse (org.apache.nifi.cluster.manager.NodeResponse)64 HashMap (java.util.HashMap)44 NodeIdentifier (org.apache.nifi.cluster.protocol.NodeIdentifier)44 Map (java.util.Map)38 URI (java.net.URI)32 Set (java.util.Set)23 URISyntaxException (java.net.URISyntaxException)17 ProcessorEntity (org.apache.nifi.web.api.entity.ProcessorEntity)16 MultivaluedHashMap (javax.ws.rs.core.MultivaluedHashMap)15 ApiOperation (io.swagger.annotations.ApiOperation)12 ApiResponses (io.swagger.annotations.ApiResponses)12 HashSet (java.util.HashSet)12 Collectors (java.util.stream.Collectors)12 Consumes (javax.ws.rs.Consumes)12 GET (javax.ws.rs.GET)12 Produces (javax.ws.rs.Produces)12 Response (javax.ws.rs.core.Response)12 ArrayList (java.util.ArrayList)11 Pattern (java.util.regex.Pattern)11 Path (javax.ws.rs.Path)11