use of com.nike.riposte.server.http.Endpoint in project riposte by Nike-Inc.
the class ProxyRouterEndpointExecutionHandler method doChannelRead.
@Override
public PipelineContinuationBehavior doChannelRead(ChannelHandlerContext ctx, Object msg) {
HttpProcessingState state = ChannelAttributes.getHttpProcessingStateForChannel(ctx).get();
Endpoint<?> endpoint = state.getEndpointForExecution();
if (shouldHandleDoChannelReadMessage(msg, endpoint)) {
ProxyRouterProcessingState proxyRouterState = getOrCreateProxyRouterProcessingState(ctx);
ProxyRouterEndpoint endpointProxyRouter = ((ProxyRouterEndpoint) endpoint);
RequestInfo<?> requestInfo = state.getRequestInfo();
if (msg instanceof HttpRequest) {
if (requestInfo instanceof RiposteInternalRequestInfo) {
// Tell this RequestInfo that we'll be managing the release of content chunks, so that when
// RequestInfo.releaseAllResources() is called we don't have extra reference count removals.
((RiposteInternalRequestInfo) requestInfo).contentChunksWillBeReleasedExternally();
}
// We're supposed to start streaming. There may be pre-endpoint-execution validation logic or other work
// that needs to happen before the endpoint is executed, so set up the CompletableFuture for the
// endpoint call to only execute if the pre-endpoint-execution validation/work chain is successful.
CompletableFuture<DownstreamRequestFirstChunkInfo> firstChunkFuture = state.getPreEndpointExecutionWorkChain().thenCompose(functionWithTracingAndMdc(aVoid -> endpointProxyRouter.getDownstreamRequestFirstChunkInfo(requestInfo, longRunningTaskExecutor, ctx), ctx));
Long endpointTimeoutOverride = endpointProxyRouter.completableFutureTimeoutOverrideMillis();
long callTimeoutValueToUse = (endpointTimeoutOverride == null) ? defaultCompletableFutureTimeoutMillis : endpointTimeoutOverride;
// When the first chunk is ready, stream it downstream and set up what happens afterward.
firstChunkFuture.whenComplete((downstreamRequestFirstChunkInfo, throwable) -> {
Optional<ManualModeTask<HttpResponse>> circuitBreakerManualTask = getCircuitBreaker(downstreamRequestFirstChunkInfo, ctx).map(CircuitBreaker::newManualModeTask);
StreamingCallback callback = new StreamingCallbackForCtx(ctx, circuitBreakerManualTask, endpointProxyRouter, requestInfo, proxyRouterState);
if (throwable != null) {
// Something blew up trying to determine the first chunk info.
callback.unrecoverableErrorOccurred(throwable, true);
} else if (!ctx.channel().isOpen()) {
// The channel was closed for some reason before we were able to start streaming.
String errorMsg = "The channel from the original caller was closed before we could begin the " + "downstream call.";
Exception channelClosedException = new RuntimeException(errorMsg);
runnableWithTracingAndMdc(() -> logger.warn(errorMsg), ctx).run();
callback.unrecoverableErrorOccurred(channelClosedException, true);
} else {
try {
// Ok we have the first chunk info. Start by setting the downstream call info in the request
// info (i.e. for access logs if desired)
requestInfo.addRequestAttribute(DOWNSTREAM_CALL_PATH_REQUEST_ATTR_KEY, HttpUtils.extractPath(downstreamRequestFirstChunkInfo.firstChunk.uri()));
// Try our circuit breaker (if we have one).
Throwable circuitBreakerException = null;
try {
circuitBreakerManualTask.ifPresent(ManualModeTask::throwExceptionIfCircuitBreakerIsOpen);
} catch (Throwable t) {
circuitBreakerException = t;
}
if (circuitBreakerException == null) {
// No circuit breaker, or the breaker is closed. We can now stream the first chunk info.
String downstreamHost = downstreamRequestFirstChunkInfo.host;
int downstreamPort = downstreamRequestFirstChunkInfo.port;
HttpRequest downstreamRequestFirstChunk = downstreamRequestFirstChunkInfo.firstChunk;
boolean isSecureHttpsCall = downstreamRequestFirstChunkInfo.isHttps;
boolean relaxedHttpsValidation = downstreamRequestFirstChunkInfo.relaxedHttpsValidation;
boolean performSubSpanAroundDownstreamCall = downstreamRequestFirstChunkInfo.performSubSpanAroundDownstreamCall;
boolean addTracingHeadersToDownstreamCall = downstreamRequestFirstChunkInfo.addTracingHeadersToDownstreamCall;
// Tell the proxyRouterState about the streaming callback so that
// callback.unrecoverableErrorOccurred(...) can be called in the case of an error
// on subsequent chunks.
proxyRouterState.setStreamingCallback(callback);
// Setup the streaming channel future with everything it needs to kick off the
// downstream request.
proxyRouterState.setStreamingStartTimeNanos(System.nanoTime());
CompletableFuture<StreamingChannel> streamingChannel = streamingAsyncHttpClient.streamDownstreamCall(downstreamHost, downstreamPort, downstreamRequestFirstChunk, isSecureHttpsCall, relaxedHttpsValidation, callback, callTimeoutValueToUse, performSubSpanAroundDownstreamCall, addTracingHeadersToDownstreamCall, proxyRouterState, requestInfo, ctx);
// Tell the streaming channel future what to do when it completes.
streamingChannel = streamingChannel.whenComplete((sc, cause) -> {
if (cause == null) {
// Successfully connected and sent the first chunk. We can now safely let
// the remaining content chunks through for streaming.
proxyRouterState.triggerChunkProcessing(sc);
} else {
// Something blew up while connecting to the downstream server.
callback.unrecoverableErrorOccurred(cause, true);
}
});
// Set the streaming channel future on the state so it can be connected to.
proxyRouterState.setStreamingChannelCompletableFuture(streamingChannel);
} else {
// Circuit breaker is tripped (or otherwise threw an unexpected exception). Immediately
// short circuit the error back to the client.
callback.unrecoverableErrorOccurred(circuitBreakerException, true);
}
} catch (Throwable t) {
callback.unrecoverableErrorOccurred(t, true);
}
}
});
} else if (msg instanceof HttpContent) {
HttpContent msgContent = (HttpContent) msg;
// chunk-streaming behavior and subsequent cleanup for the given HttpContent.
if (!releaseContentChunkIfStreamAlreadyFailed(msgContent, proxyRouterState)) {
registerChunkStreamingAction(proxyRouterState, msgContent, ctx);
}
}
return PipelineContinuationBehavior.DO_NOT_FIRE_CONTINUE_EVENT;
}
return PipelineContinuationBehavior.CONTINUE;
}
use of com.nike.riposte.server.http.Endpoint in project riposte by Nike-Inc.
the class PolymorphicSecurityValidatorTest method constructorSingleValidator.
@Test
public void constructorSingleValidator() {
Endpoint mockEndpoint = mock(Endpoint.class);
RequestSecurityValidator innerValidator = mock(RequestSecurityValidator.class);
doReturn(Collections.singletonList(mockEndpoint)).when(innerValidator).endpointsToValidate();
List<RequestSecurityValidator> validatorList = new ArrayList<>();
validatorList.add(innerValidator);
Map<Endpoint<?>, List<RequestSecurityValidator>> validationMap = new PolymorphicSecurityValidator(validatorList).validationMap;
assertThat(validationMap.size(), is(1));
assertThat(validationMap.get(mockEndpoint).size(), is(1));
assertThat(validationMap.get(mockEndpoint).get(0), is(innerValidator));
}
use of com.nike.riposte.server.http.Endpoint in project riposte by Nike-Inc.
the class PolymorphicSecurityValidatorTest method constructorMultipleValidators.
@Test
public void constructorMultipleValidators() {
Endpoint mockEndpoint = mock(Endpoint.class);
RequestSecurityValidator innerVal1 = mock(RequestSecurityValidator.class);
doReturn(Collections.singletonList(mockEndpoint)).when(innerVal1).endpointsToValidate();
RequestSecurityValidator innerVal2 = mock(RequestSecurityValidator.class);
doReturn(Collections.singletonList(mockEndpoint)).when(innerVal2).endpointsToValidate();
List<RequestSecurityValidator> validatorList = new ArrayList<>();
validatorList.add(innerVal1);
validatorList.add(innerVal2);
Map<Endpoint<?>, List<RequestSecurityValidator>> validationMap = new PolymorphicSecurityValidator(validatorList).validationMap;
assertThat(validationMap.size(), is(1));
assertThat(validationMap.get(mockEndpoint).size(), is(2));
assertThat(validationMap.get(mockEndpoint).get(0), is(innerVal1));
assertThat(validationMap.get(mockEndpoint).get(1), is(innerVal2));
}
use of com.nike.riposte.server.http.Endpoint in project riposte by Nike-Inc.
the class CodahaleMetricsListenerTest method initServerConfigMetrics_adds_expected_metrics.
@DataProvider(value = { "true", "false" })
@Test
public void initServerConfigMetrics_adds_expected_metrics(boolean includeServerConfigMetrics) {
// given
setupMetricRegistryAndCodahaleMetricsCollector();
CodahaleMetricsListener instance = CodahaleMetricsListener.newBuilder(cmcMock).withEndpointMetricsHandler(endpointMetricsHandlerMock).withIncludeServerConfigMetrics(includeServerConfigMetrics).build();
verifyServerStatisticMetrics(instance);
String expectedBossThreadsGaugeName = name(ServerConfig.class.getSimpleName(), "boss_threads");
String expectedWorkerThreadsGaugeName = name(ServerConfig.class.getSimpleName(), "worker_threads");
String expectedMaxRequestSizeInBytesGaugeName = name(ServerConfig.class.getSimpleName(), "max_request_size_in_bytes");
String expectedEndpointsListGaugeName = name(ServerConfig.class.getSimpleName(), "endpoints");
List<String> expectedEndpointsListValue = serverConfig.appEndpoints().stream().map(endpoint -> endpoint.getClass().getName() + "-" + instance.getMatchingHttpMethodsAsCombinedString(endpoint) + "-" + endpoint.requestMatcher().matchingPathTemplates()).collect(Collectors.toList());
// when
instance.initEndpointAndServerConfigMetrics(serverConfig);
// then
if (includeServerConfigMetrics) {
// Metrics for server config values
assertThat(registeredGauges).containsKey(expectedBossThreadsGaugeName);
assertThat(registeredGauges.get(expectedBossThreadsGaugeName).getValue()).isEqualTo(serverConfig.numBossThreads());
verify(cmcMock).registerNamedMetric(expectedBossThreadsGaugeName, registeredGauges.get(expectedBossThreadsGaugeName));
assertThat(registeredGauges).containsKey(expectedWorkerThreadsGaugeName);
assertThat(registeredGauges.get(expectedWorkerThreadsGaugeName).getValue()).isEqualTo(serverConfig.numWorkerThreads());
verify(cmcMock).registerNamedMetric(expectedWorkerThreadsGaugeName, registeredGauges.get(expectedWorkerThreadsGaugeName));
assertThat(registeredGauges).containsKey(expectedMaxRequestSizeInBytesGaugeName);
assertThat(registeredGauges.get(expectedMaxRequestSizeInBytesGaugeName).getValue()).isEqualTo(serverConfig.maxRequestSizeInBytes());
verify(cmcMock).registerNamedMetric(expectedMaxRequestSizeInBytesGaugeName, registeredGauges.get(expectedMaxRequestSizeInBytesGaugeName));
assertThat(registeredGauges).containsKey(expectedEndpointsListGaugeName);
assertThat(registeredGauges.get(expectedEndpointsListGaugeName).getValue()).isEqualTo(expectedEndpointsListValue);
verify(cmcMock).registerNamedMetric(expectedEndpointsListGaugeName, registeredGauges.get(expectedEndpointsListGaugeName));
} else {
// No server config values should have been registered.
verifyNoMoreInteractions(metricRegistryMock);
}
// In either case, the EndpointMetricsHandler should have been called to delegate setting up endpoint-specific metrics.
verify(endpointMetricsHandlerMock).setupEndpointsMetrics(serverConfig, metricRegistryMock);
}
use of com.nike.riposte.server.http.Endpoint in project riposte by Nike-Inc.
the class CodahaleMetricsListenerTest method beforeMethod.
@Before
public void beforeMethod() {
setupMetricRegistryAndCodahaleMetricsCollector();
endpointMetricsHandlerMock = mock(EndpointMetricsHandler.class);
mockHistogramSupplier = () -> mock(Histogram.class);
listener = new CodahaleMetricsListener(cmcMock, endpointMetricsHandlerMock, true, null, null, mockHistogramSupplier);
serverConfig = new ServerConfig() {
private final List<Endpoint<?>> endpoints = Arrays.asList(new DummyEndpoint(Matcher.match("/foo")), new DummyEndpoint(Matcher.match("/bar", HttpMethod.POST, HttpMethod.PUT)), new DummyEndpoint(Matcher.multiMatch(Arrays.asList("/multiFoo", "/multiBar"))), new DummyEndpoint(Matcher.multiMatch(Arrays.asList("/multiBaz", "/multiBat"), HttpMethod.PATCH, HttpMethod.OPTIONS)));
@Override
@NotNull
public Collection<@NotNull Endpoint<?>> appEndpoints() {
return endpoints;
}
@Override
public int numBossThreads() {
return 3;
}
@Override
public int numWorkerThreads() {
return 42;
}
@Override
public int maxRequestSizeInBytes() {
return 42434445;
}
};
listener.initEndpointAndServerConfigMetrics(serverConfig);
requestInfoMock = mock(RequestInfo.class);
responseInfoMock = mock(ResponseInfo.class);
state = new HttpProcessingState();
state.setRequestInfo(requestInfoMock);
state.setResponseInfo(responseInfoMock, null);
requestStartTimeNanos = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(42);
state.setRequestStartTimeNanos(requestStartTimeNanos);
}
Aggregations