Search in sources :

Example 1 with IllegalClusterStateException

use of org.apache.nifi.cluster.manager.exception.IllegalClusterStateException in project nifi by apache.

the class ThreadPoolRequestReplicator method replicate.

/**
 * Replicates the request to all nodes in the given set of node identifiers
 *
 * @param nodeIds             the NodeIdentifiers that identify which nodes to send the request to
 * @param method              the HTTP method to use
 * @param uri                 the URI to send the request to
 * @param entity              the entity to use
 * @param headers             the HTTP Headers
 * @param performVerification whether or not to verify that all nodes in the cluster are connected and that all nodes can perform request. Ignored if request is not mutable.
 * @param response            the response to update with the results
 * @param executionPhase      <code>true</code> if this is the execution phase, <code>false</code> otherwise
 * @param monitor             a monitor that will be notified when the request completes (successfully or otherwise)
 * @return an AsyncClusterResponse that can be used to obtain the result
 */
AsyncClusterResponse replicate(final Set<NodeIdentifier> nodeIds, final String method, final URI uri, final Object entity, final Map<String, String> headers, final boolean performVerification, StandardAsyncClusterResponse response, final boolean executionPhase, final boolean merge, final Object monitor) {
    try {
        // state validation
        Objects.requireNonNull(nodeIds);
        Objects.requireNonNull(method);
        Objects.requireNonNull(uri);
        Objects.requireNonNull(entity);
        Objects.requireNonNull(headers);
        if (nodeIds.isEmpty()) {
            throw new IllegalArgumentException("Cannot replicate request to 0 nodes");
        }
        // verify all of the nodes exist and are in the proper state
        for (final NodeIdentifier nodeId : nodeIds) {
            final NodeConnectionStatus status = clusterCoordinator.getConnectionStatus(nodeId);
            if (status == null) {
                throw new UnknownNodeException("Node " + nodeId + " does not exist in this cluster");
            }
            if (status.getState() != NodeConnectionState.CONNECTED) {
                throw new IllegalClusterStateException("Cannot replicate request to Node " + nodeId + " because the node is not connected");
            }
        }
        logger.debug("Replicating request {} {} with entity {} to {}; response is {}", method, uri, entity, nodeIds, response);
        // Update headers to indicate the current revision so that we can
        // prevent multiple users changing the flow at the same time
        final Map<String, String> updatedHeaders = new HashMap<>(headers);
        final String requestId = updatedHeaders.computeIfAbsent(REQUEST_TRANSACTION_ID_HEADER, key -> UUID.randomUUID().toString());
        long verifyClusterStateNanos = -1;
        if (performVerification) {
            final long start = System.nanoTime();
            verifyClusterState(method, uri.getPath());
            verifyClusterStateNanos = System.nanoTime() - start;
        }
        int numRequests = responseMap.size();
        if (numRequests >= maxConcurrentRequests) {
            numRequests = purgeExpiredRequests();
        }
        if (numRequests >= maxConcurrentRequests) {
            final Map<String, Long> countsByUri = responseMap.values().stream().collect(Collectors.groupingBy(StandardAsyncClusterResponse::getURIPath, Collectors.counting()));
            logger.error("Cannot replicate request {} {} because there are {} outstanding HTTP Requests already. Request Counts Per URI = {}", method, uri.getPath(), numRequests, countsByUri);
            throw new IllegalStateException("There are too many outstanding HTTP requests with a total " + numRequests + " outstanding requests");
        }
        // create a response object if one was not already passed to us
        if (response == null) {
            // create the request objects and replicate to all nodes.
            // When the request has completed, we need to ensure that we notify the monitor, if there is one.
            final CompletionCallback completionCallback = clusterResponse -> {
                try {
                    onCompletedResponse(requestId);
                } finally {
                    if (monitor != null) {
                        synchronized (monitor) {
                            monitor.notify();
                        }
                        logger.debug("Notified monitor {} because request {} {} has completed", monitor, method, uri);
                    }
                }
            };
            final Runnable responseConsumedCallback = () -> onResponseConsumed(requestId);
            response = new StandardAsyncClusterResponse(requestId, uri, method, nodeIds, responseMapper, completionCallback, responseConsumedCallback, merge);
            responseMap.put(requestId, response);
        }
        if (verifyClusterStateNanos > -1) {
            response.addTiming("Verify Cluster State", "All Nodes", verifyClusterStateNanos);
        }
        logger.debug("For Request ID {}, response object is {}", requestId, response);
        // if mutable request, we have to do a two-phase commit where we ask each node to verify
        // that the request can take place and then, if all nodes agree that it can, we can actually
        // issue the request. This is all handled by calling performVerification, which will replicate
        // the 'vote' request to all nodes and then if successful will call back into this method to
        // replicate the actual request.
        final boolean mutableRequest = isMutableRequest(method, uri.getPath());
        if (mutableRequest && performVerification) {
            logger.debug("Performing verification (first phase of two-phase commit) for Request ID {}", requestId);
            performVerification(nodeIds, method, uri, entity, updatedHeaders, response, merge, monitor);
            return response;
        } else if (mutableRequest) {
            response.setPhase(StandardAsyncClusterResponse.COMMIT_PHASE);
        }
        // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work
        final StandardAsyncClusterResponse finalResponse = response;
        NodeRequestCompletionCallback nodeCompletionCallback = nodeResponse -> {
            logger.debug("Received response from {} for {} {}", nodeResponse.getNodeId(), method, uri.getPath());
            finalResponse.add(nodeResponse);
        };
        // instruct the node to actually perform the underlying action
        if (mutableRequest && executionPhase) {
            updatedHeaders.put(REQUEST_EXECUTION_HTTP_HEADER, "true");
        }
        // replicate the request to all nodes
        final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId, method, createURI(uri, nodeId), entity, updatedHeaders, nodeCompletionCallback, finalResponse);
        submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, updatedHeaders);
        return response;
    } catch (final Throwable t) {
        if (monitor != null) {
            synchronized (monitor) {
                monitor.notify();
            }
            logger.debug("Notified monitor {} because request {} {} has failed with Throwable {}", monitor, method, uri, t);
        }
        if (response != null) {
            final RuntimeException failure = (t instanceof RuntimeException) ? (RuntimeException) t : new RuntimeException("Failed to submit Replication Request to background thread", t);
            response.setFailure(failure, new NodeIdentifier());
        }
        throw t;
    }
}
Also used : NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) DisconnectedNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException) URISyntaxException(java.net.URISyntaxException) LoggerFactory(org.slf4j.LoggerFactory) UriConstructionException(org.apache.nifi.cluster.manager.exception.UriConstructionException) StringUtils(org.apache.commons.lang3.StringUtils) MediaType(javax.ws.rs.core.MediaType) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) URI(java.net.URI) ThreadFactory(java.util.concurrent.ThreadFactory) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ComponentIdGenerator(org.apache.nifi.util.ComponentIdGenerator) HttpHeaders(org.apache.nifi.remote.protocol.http.HttpHeaders) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HttpResponseMapper(org.apache.nifi.cluster.coordination.http.HttpResponseMapper) Set(java.util.Set) ClientProperties(org.glassfish.jersey.client.ClientProperties) Invocation(javax.ws.rs.client.Invocation) UUID(java.util.UUID) EncodingFilter(org.glassfish.jersey.client.filter.EncodingFilter) StandardHttpResponseMapper(org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper) ProxiedEntitiesUtils(org.apache.nifi.web.security.ProxiedEntitiesUtils) Entity(javax.ws.rs.client.Entity) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) List(java.util.List) GZipEncoder(org.glassfish.jersey.message.GZipEncoder) Stream(java.util.stream.Stream) Response(javax.ws.rs.core.Response) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) Entry(java.util.Map.Entry) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) NoConnectedNodesException(org.apache.nifi.cluster.manager.exception.NoConnectedNodesException) Client(javax.ws.rs.client.Client) AccessDeniedException(org.apache.nifi.authorization.AccessDeniedException) HashMap(java.util.HashMap) ConnectingNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Function(java.util.function.Function) HttpMethod(javax.ws.rs.HttpMethod) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) NiFiUser(org.apache.nifi.authorization.user.NiFiUser) ClusterCoordinator(org.apache.nifi.cluster.coordination.ClusterCoordinator) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) NodeConnectionState(org.apache.nifi.cluster.coordination.node.NodeConnectionState) Status(javax.ws.rs.core.Response.Status) NodeConnectionStatus(org.apache.nifi.cluster.coordination.node.NodeConnectionStatus) LongSummaryStatistics(java.util.LongSummaryStatistics) JwtAuthenticationFilter(org.apache.nifi.web.security.jwt.JwtAuthenticationFilter) Logger(org.slf4j.Logger) MultivaluedHashMap(javax.ws.rs.core.MultivaluedHashMap) MultivaluedMap(javax.ws.rs.core.MultivaluedMap) TimeUnit(java.util.concurrent.TimeUnit) Lock(java.util.concurrent.locks.Lock) EventReporter(org.apache.nifi.events.EventReporter) FormatUtils(org.apache.nifi.util.FormatUtils) NiFiProperties(org.apache.nifi.util.NiFiProperties) NiFiUserUtils(org.apache.nifi.authorization.user.NiFiUserUtils) Severity(org.apache.nifi.reporting.Severity) UnknownNodeException(org.apache.nifi.cluster.manager.exception.UnknownNodeException) WebTarget(javax.ws.rs.client.WebTarget) Collections(java.util.Collections) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) MultivaluedHashMap(javax.ws.rs.core.MultivaluedHashMap) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) NodeConnectionStatus(org.apache.nifi.cluster.coordination.node.NodeConnectionStatus) UnknownNodeException(org.apache.nifi.cluster.manager.exception.UnknownNodeException) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier)

Example 2 with IllegalClusterStateException

use of org.apache.nifi.cluster.manager.exception.IllegalClusterStateException in project nifi by apache.

the class ThreadPoolRequestReplicator method performVerification.

private void performVerification(final Set<NodeIdentifier> nodeIds, final String method, final URI uri, final Object entity, final Map<String, String> headers, final StandardAsyncClusterResponse clusterResponse, final boolean merge, final Object monitor) {
    logger.debug("Verifying that mutable request {} {} can be made", method, uri.getPath());
    final Map<String, String> validationHeaders = new HashMap<>(headers);
    validationHeaders.put(REQUEST_VALIDATION_HTTP_HEADER, NODE_CONTINUE);
    final long startNanos = System.nanoTime();
    final int numNodes = nodeIds.size();
    final NodeRequestCompletionCallback completionCallback = new NodeRequestCompletionCallback() {

        final Set<NodeResponse> nodeResponses = Collections.synchronizedSet(new HashSet<>());

        @Override
        public void onCompletion(final NodeResponse nodeResponse) {
            // Add the node response to our collection. We later need to know whether or
            // not this is the last node response, so we add the response and then check
            // the size within a synchronized block to ensure that those two things happen
            // atomically. Otherwise, we could have multiple threads checking the sizes of
            // the sets at the same time, which could result in multiple threads performing
            // the 'all nodes are complete' logic.
            final boolean allNodesResponded;
            synchronized (nodeResponses) {
                nodeResponses.add(nodeResponse);
                allNodesResponded = nodeResponses.size() == numNodes;
            }
            try {
                final long nanos = System.nanoTime() - startNanos;
                clusterResponse.addTiming("Completed Verification", nodeResponse.getNodeId().toString(), nanos);
                // and if good replicate the original request to all of the nodes.
                if (allNodesResponded) {
                    clusterResponse.addTiming("Verification Completed", "All Nodes", nanos);
                    // Check if we have any requests that do not have a 150-Continue status code.
                    final long dissentingCount = nodeResponses.stream().filter(p -> p.getStatus() != NODE_CONTINUE_STATUS_CODE).count();
                    // to all nodes and we are finished.
                    if (dissentingCount == 0) {
                        logger.debug("Received verification from all {} nodes that mutable request {} {} can be made", numNodes, method, uri.getPath());
                        replicate(nodeIds, method, uri, entity, headers, false, clusterResponse, true, merge, monitor);
                        return;
                    }
                    try {
                        final Map<String, String> cancelLockHeaders = new HashMap<>(headers);
                        cancelLockHeaders.put(REQUEST_TRANSACTION_CANCELATION_HTTP_HEADER, "true");
                        final Thread cancelLockThread = new Thread(new Runnable() {

                            @Override
                            public void run() {
                                logger.debug("Found {} dissenting nodes for {} {}; canceling claim request", dissentingCount, method, uri.getPath());
                                final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId, method, createURI(uri, nodeId), entity, cancelLockHeaders, null, clusterResponse);
                                submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, cancelLockHeaders);
                            }
                        });
                        cancelLockThread.setName("Cancel Flow Locks");
                        cancelLockThread.start();
                        // Check that all nodes responded successfully.
                        for (final NodeResponse response : nodeResponses) {
                            if (response.getStatus() != NODE_CONTINUE_STATUS_CODE) {
                                final Response clientResponse = response.getClientResponse();
                                final String message;
                                if (clientResponse == null) {
                                    message = "Node " + response.getNodeId() + " is unable to fulfill this request due to: Unexpected Response Code " + response.getStatus();
                                    logger.info("Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. The action will not occur", response.getStatus(), response.getNodeId(), method, uri.getPath());
                                } else {
                                    final String nodeExplanation = clientResponse.readEntity(String.class);
                                    message = "Node " + response.getNodeId() + " is unable to fulfill this request due to: " + nodeExplanation;
                                    logger.info("Received a status of {} from {} for request {} {} when performing first stage of two-stage commit. " + "The action will not occur. Node explanation: {}", response.getStatus(), response.getNodeId(), method, uri.getPath(), nodeExplanation);
                                }
                                // if a node reports forbidden, use that as the response failure
                                final RuntimeException failure;
                                if (response.getStatus() == Status.FORBIDDEN.getStatusCode()) {
                                    if (response.hasThrowable()) {
                                        failure = new AccessDeniedException(message, response.getThrowable());
                                    } else {
                                        failure = new AccessDeniedException(message);
                                    }
                                } else {
                                    if (response.hasThrowable()) {
                                        failure = new IllegalClusterStateException(message, response.getThrowable());
                                    } else {
                                        failure = new IllegalClusterStateException(message);
                                    }
                                }
                                clusterResponse.setFailure(failure, response.getNodeId());
                            }
                        }
                    } finally {
                        if (monitor != null) {
                            synchronized (monitor) {
                                monitor.notify();
                            }
                            logger.debug("Notified monitor {} because request {} {} has failed due to at least 1 dissenting node", monitor, method, uri);
                        }
                    }
                }
            } catch (final Exception e) {
                clusterResponse.add(new NodeResponse(nodeResponse.getNodeId(), method, uri, e));
                // to the Cluster Response so that the Cluster Response is complete.
                for (final NodeResponse otherResponse : nodeResponses) {
                    if (otherResponse.getNodeId().equals(nodeResponse.getNodeId())) {
                        continue;
                    }
                    clusterResponse.add(otherResponse);
                }
            }
        }
    };
    // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work
    final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId, method, createURI(uri, nodeId), entity, validationHeaders, completionCallback, clusterResponse);
    // replicate the 'verification request' to all nodes
    submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, validationHeaders);
}
Also used : NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) DisconnectedNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException) URISyntaxException(java.net.URISyntaxException) LoggerFactory(org.slf4j.LoggerFactory) UriConstructionException(org.apache.nifi.cluster.manager.exception.UriConstructionException) StringUtils(org.apache.commons.lang3.StringUtils) MediaType(javax.ws.rs.core.MediaType) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) URI(java.net.URI) ThreadFactory(java.util.concurrent.ThreadFactory) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) ReadWriteLock(java.util.concurrent.locks.ReadWriteLock) ComponentIdGenerator(org.apache.nifi.util.ComponentIdGenerator) HttpHeaders(org.apache.nifi.remote.protocol.http.HttpHeaders) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HttpResponseMapper(org.apache.nifi.cluster.coordination.http.HttpResponseMapper) Set(java.util.Set) ClientProperties(org.glassfish.jersey.client.ClientProperties) Invocation(javax.ws.rs.client.Invocation) UUID(java.util.UUID) EncodingFilter(org.glassfish.jersey.client.filter.EncodingFilter) StandardHttpResponseMapper(org.apache.nifi.cluster.coordination.http.StandardHttpResponseMapper) ProxiedEntitiesUtils(org.apache.nifi.web.security.ProxiedEntitiesUtils) Entity(javax.ws.rs.client.Entity) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) Objects(java.util.Objects) List(java.util.List) GZipEncoder(org.glassfish.jersey.message.GZipEncoder) Stream(java.util.stream.Stream) Response(javax.ws.rs.core.Response) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) Entry(java.util.Map.Entry) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) NoConnectedNodesException(org.apache.nifi.cluster.manager.exception.NoConnectedNodesException) Client(javax.ws.rs.client.Client) AccessDeniedException(org.apache.nifi.authorization.AccessDeniedException) HashMap(java.util.HashMap) ConnectingNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) Function(java.util.function.Function) HttpMethod(javax.ws.rs.HttpMethod) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) NiFiUser(org.apache.nifi.authorization.user.NiFiUser) ClusterCoordinator(org.apache.nifi.cluster.coordination.ClusterCoordinator) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) NodeConnectionState(org.apache.nifi.cluster.coordination.node.NodeConnectionState) Status(javax.ws.rs.core.Response.Status) NodeConnectionStatus(org.apache.nifi.cluster.coordination.node.NodeConnectionStatus) LongSummaryStatistics(java.util.LongSummaryStatistics) JwtAuthenticationFilter(org.apache.nifi.web.security.jwt.JwtAuthenticationFilter) Logger(org.slf4j.Logger) MultivaluedHashMap(javax.ws.rs.core.MultivaluedHashMap) MultivaluedMap(javax.ws.rs.core.MultivaluedMap) TimeUnit(java.util.concurrent.TimeUnit) Lock(java.util.concurrent.locks.Lock) EventReporter(org.apache.nifi.events.EventReporter) FormatUtils(org.apache.nifi.util.FormatUtils) NiFiProperties(org.apache.nifi.util.NiFiProperties) NiFiUserUtils(org.apache.nifi.authorization.user.NiFiUserUtils) Severity(org.apache.nifi.reporting.Severity) UnknownNodeException(org.apache.nifi.cluster.manager.exception.UnknownNodeException) WebTarget(javax.ws.rs.client.WebTarget) Collections(java.util.Collections) AccessDeniedException(org.apache.nifi.authorization.AccessDeniedException) Set(java.util.Set) HashSet(java.util.HashSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) MultivaluedHashMap(javax.ws.rs.core.MultivaluedHashMap) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) DisconnectedNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException) URISyntaxException(java.net.URISyntaxException) UriConstructionException(org.apache.nifi.cluster.manager.exception.UriConstructionException) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) NoConnectedNodesException(org.apache.nifi.cluster.manager.exception.NoConnectedNodesException) AccessDeniedException(org.apache.nifi.authorization.AccessDeniedException) ConnectingNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException) UnknownNodeException(org.apache.nifi.cluster.manager.exception.UnknownNodeException) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) Response(javax.ws.rs.core.Response) Function(java.util.function.Function) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) HashSet(java.util.HashSet)

Example 3 with IllegalClusterStateException

use of org.apache.nifi.cluster.manager.exception.IllegalClusterStateException in project nifi by apache.

the class TestThreadPoolRequestReplicator method testOneNodeRejectsTwoPhaseCommit.

@Test(timeout = 15000)
public void testOneNodeRejectsTwoPhaseCommit() {
    final Set<NodeIdentifier> nodeIds = new HashSet<>();
    nodeIds.add(new NodeIdentifier("1", "localhost", 8100, "localhost", 8101, "localhost", 8102, 8103, false));
    nodeIds.add(new NodeIdentifier("2", "localhost", 8200, "localhost", 8201, "localhost", 8202, 8203, false));
    final ClusterCoordinator coordinator = createClusterCoordinator();
    final AtomicInteger requestCount = new AtomicInteger(0);
    final NiFiProperties props = NiFiProperties.createBasicNiFiProperties(null, null);
    final ThreadPoolRequestReplicator replicator = new ThreadPoolRequestReplicator(2, 5, 100, ClientBuilder.newClient(), coordinator, "1 sec", "1 sec", null, null, props) {

        @Override
        protected NodeResponse replicateRequest(final Invocation invocation, final NodeIdentifier nodeId, final String method, final URI uri, final String requestId, Map<String, String> givenHeaders, final StandardAsyncClusterResponse response) {
            // the resource builder will not expose its headers to us, so we are using Mockito's Whitebox class to extract them.
            final ClientRequest requestContext = (ClientRequest) Whitebox.getInternalState(invocation, "requestContext");
            final Object expectsHeader = requestContext.getHeaders().getFirst(ThreadPoolRequestReplicator.REQUEST_VALIDATION_HTTP_HEADER);
            final int requestIndex = requestCount.incrementAndGet();
            assertEquals(ThreadPoolRequestReplicator.NODE_CONTINUE, expectsHeader);
            if (requestIndex == 1) {
                final Response clientResponse = mock(Response.class);
                when(clientResponse.getStatus()).thenReturn(150);
                return new NodeResponse(nodeId, method, uri, clientResponse, -1L, requestId);
            } else {
                final IllegalClusterStateException explanation = new IllegalClusterStateException("Intentional Exception for Unit Testing");
                return new NodeResponse(nodeId, method, uri, explanation);
            }
        }
    };
    try {
        // set the user
        final Authentication authentication = new NiFiAuthenticationToken(new NiFiUserDetails(StandardNiFiUser.ANONYMOUS));
        SecurityContextHolder.getContext().setAuthentication(authentication);
        final AsyncClusterResponse clusterResponse = replicator.replicate(nodeIds, HttpMethod.POST, new URI("http://localhost:80/processors/1"), new ProcessorEntity(), new HashMap<>(), true, true);
        clusterResponse.awaitMergedResponse();
        Assert.fail("Expected to get an IllegalClusterStateException but did not");
    } catch (final IllegalClusterStateException e) {
    // Expected
    } catch (final Exception e) {
        Assert.fail(e.toString());
    } finally {
        replicator.shutdown();
    }
}
Also used : NiFiProperties(org.apache.nifi.util.NiFiProperties) Invocation(javax.ws.rs.client.Invocation) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) ProcessorEntity(org.apache.nifi.web.api.entity.ProcessorEntity) URI(java.net.URI) ClientRequest(org.glassfish.jersey.client.ClientRequest) NiFiUserDetails(org.apache.nifi.authorization.user.NiFiUserDetails) HashSet(java.util.HashSet) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) ClusterCoordinator(org.apache.nifi.cluster.coordination.ClusterCoordinator) DisconnectedNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException) URISyntaxException(java.net.URISyntaxException) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) ProcessingException(javax.ws.rs.ProcessingException) ConnectingNodeMutableRequestException(org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException) SocketTimeoutException(java.net.SocketTimeoutException) NiFiAuthenticationToken(org.apache.nifi.web.security.token.NiFiAuthenticationToken) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) Response(javax.ws.rs.core.Response) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Authentication(org.springframework.security.core.Authentication) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier) Map(java.util.Map) HashMap(java.util.HashMap) MultiValueMap(org.apache.commons.collections4.map.MultiValueMap) Test(org.junit.Test)

Example 4 with IllegalClusterStateException

use of org.apache.nifi.cluster.manager.exception.IllegalClusterStateException in project nifi by apache.

the class StandardNiFiContentAccess method getContent.

@Override
public DownloadableContent getContent(final ContentRequestContext request) {
    // if clustered, send request to cluster manager
    if (properties.isClustered() && clusterCoordinator != null && clusterCoordinator.isConnected()) {
        // get the URI
        URI dataUri;
        try {
            dataUri = new URI(request.getDataUri());
        } catch (final URISyntaxException use) {
            throw new ClusterRequestException(use);
        }
        // set the request parameters
        final MultivaluedMap<String, String> parameters = new MultivaluedHashMap();
        parameters.add(CLIENT_ID_PARAM, request.getClientId());
        // set the headers
        final Map<String, String> headers = new HashMap<>();
        // ensure we were able to detect the cluster node id
        if (request.getClusterNodeId() == null) {
            throw new IllegalArgumentException("Unable to determine the which node has the content.");
        }
        // get the target node and ensure it exists
        final NodeIdentifier nodeId = clusterCoordinator.getNodeIdentifier(request.getClusterNodeId());
        // replicate the request to the cluster coordinator, indicating the target node
        NodeResponse nodeResponse;
        try {
            headers.put(RequestReplicator.REPLICATION_TARGET_NODE_UUID_HEADER, nodeId.getId());
            final NodeIdentifier coordinatorNode = clusterCoordinator.getElectedActiveCoordinatorNode();
            if (coordinatorNode == null) {
                throw new NoClusterCoordinatorException();
            }
            final Set<NodeIdentifier> coordinatorNodes = Collections.singleton(coordinatorNode);
            nodeResponse = requestReplicator.replicate(coordinatorNodes, HttpMethod.GET, dataUri, parameters, headers, false, true).awaitMergedResponse();
        } catch (InterruptedException e) {
            throw new IllegalClusterStateException("Interrupted while waiting for a response from node");
        }
        final Response clientResponse = nodeResponse.getClientResponse();
        final MultivaluedMap<String, String> responseHeaders = clientResponse.getStringHeaders();
        // ensure an appropriate response
        if (Response.Status.NOT_FOUND.getStatusCode() == clientResponse.getStatusInfo().getStatusCode()) {
            throw new ResourceNotFoundException(clientResponse.readEntity(String.class));
        } else if (Response.Status.FORBIDDEN.getStatusCode() == clientResponse.getStatusInfo().getStatusCode() || Response.Status.UNAUTHORIZED.getStatusCode() == clientResponse.getStatusInfo().getStatusCode()) {
            throw new AccessDeniedException(clientResponse.readEntity(String.class));
        } else if (Response.Status.OK.getStatusCode() != clientResponse.getStatusInfo().getStatusCode()) {
            throw new IllegalStateException(clientResponse.readEntity(String.class));
        }
        // get the file name
        final String contentDisposition = responseHeaders.getFirst("Content-Disposition");
        final String filename = StringUtils.substringBetween(contentDisposition, "filename=\"", "\"");
        // get the content type
        final String contentType = responseHeaders.getFirst("Content-Type");
        // create the downloadable content
        return new DownloadableContent(filename, contentType, nodeResponse.getInputStream());
    } else {
        // example URIs:
        // http://localhost:8080/nifi-api/provenance/events/{id}/content/{input|output}
        // http://localhost:8080/nifi-api/flowfile-queues/{uuid}/flowfiles/{uuid}/content
        // get just the context path for comparison
        final String dataUri = StringUtils.substringAfter(request.getDataUri(), "/nifi-api");
        if (StringUtils.isBlank(dataUri)) {
            throw new IllegalArgumentException("The specified data reference URI is not valid.");
        }
        // flowfile listing content
        final Matcher flowFileMatcher = FLOWFILE_CONTENT_URI_PATTERN.matcher(dataUri);
        if (flowFileMatcher.matches()) {
            final String connectionId = flowFileMatcher.group(1);
            final String flowfileId = flowFileMatcher.group(2);
            return getFlowFileContent(connectionId, flowfileId, dataUri);
        }
        // provenance event content
        final Matcher provenanceMatcher = PROVENANCE_CONTENT_URI_PATTERN.matcher(dataUri);
        if (provenanceMatcher.matches()) {
            try {
                final Long eventId = Long.parseLong(provenanceMatcher.group(1));
                final ContentDirection direction = ContentDirection.valueOf(provenanceMatcher.group(2).toUpperCase());
                return getProvenanceEventContent(eventId, dataUri, direction);
            } catch (final IllegalArgumentException iae) {
                throw new IllegalArgumentException("The specified data reference URI is not valid.");
            }
        }
        // invalid uri
        throw new IllegalArgumentException("The specified data reference URI is not valid.");
    }
}
Also used : AccessDeniedException(org.apache.nifi.authorization.AccessDeniedException) HashMap(java.util.HashMap) MultivaluedHashMap(javax.ws.rs.core.MultivaluedHashMap) Matcher(java.util.regex.Matcher) IllegalClusterStateException(org.apache.nifi.cluster.manager.exception.IllegalClusterStateException) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) URISyntaxException(java.net.URISyntaxException) URI(java.net.URI) MultivaluedHashMap(javax.ws.rs.core.MultivaluedHashMap) Response(javax.ws.rs.core.Response) NodeResponse(org.apache.nifi.cluster.manager.NodeResponse) NoClusterCoordinatorException(org.apache.nifi.cluster.exception.NoClusterCoordinatorException) ContentDirection(org.apache.nifi.controller.repository.claim.ContentDirection) NodeIdentifier(org.apache.nifi.cluster.protocol.NodeIdentifier)

Aggregations

URI (java.net.URI)4 URISyntaxException (java.net.URISyntaxException)4 HashMap (java.util.HashMap)4 Response (javax.ws.rs.core.Response)4 NodeResponse (org.apache.nifi.cluster.manager.NodeResponse)4 IllegalClusterStateException (org.apache.nifi.cluster.manager.exception.IllegalClusterStateException)4 NodeIdentifier (org.apache.nifi.cluster.protocol.NodeIdentifier)4 HashSet (java.util.HashSet)3 Map (java.util.Map)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 Invocation (javax.ws.rs.client.Invocation)3 ClusterCoordinator (org.apache.nifi.cluster.coordination.ClusterCoordinator)3 ConnectingNodeMutableRequestException (org.apache.nifi.cluster.manager.exception.ConnectingNodeMutableRequestException)3 DisconnectedNodeMutableRequestException (org.apache.nifi.cluster.manager.exception.DisconnectedNodeMutableRequestException)3 NiFiProperties (org.apache.nifi.util.NiFiProperties)3 Collections (java.util.Collections)2 List (java.util.List)2 LongSummaryStatistics (java.util.LongSummaryStatistics)2 Entry (java.util.Map.Entry)2 Objects (java.util.Objects)2