use of com.linkedin.r2.message.stream.StreamResponseBuilder in project rest.li by linkedin.
the class TestJetty404 method setup.
@BeforeClass
public void setup() throws IOException {
_clientFactory = new HttpClientFactory.Builder().build();
_client = new TransportClientAdapter(_clientFactory.getClient(Collections.<String, String>emptyMap()), true);
_server = new HttpServerFactory().createH2cServer(PORT, "/correct-path", 50, new TransportDispatcher() {
@Override
public void handleRestRequest(RestRequest req, Map<String, String> wireAttrs, RequestContext requestContext, TransportCallback<RestResponse> callback) {
callback.onResponse(TransportResponseImpl.success(new RestResponseBuilder().build()));
}
@Override
public void handleStreamRequest(StreamRequest req, Map<String, String> wireAttrs, RequestContext requestContext, TransportCallback<StreamResponse> callback) {
req.getEntityStream().setReader(new DrainReader());
callback.onResponse(TransportResponseImpl.success(new StreamResponseBuilder().build(EntityStreams.emptyStream())));
}
}, true);
_server.start();
}
use of com.linkedin.r2.message.stream.StreamResponseBuilder in project rest.li by linkedin.
the class ServerStreamCompressionFilter method onStreamResponse.
/**
* Optionally compresses outgoing response
*/
@Override
public void onStreamResponse(final StreamResponse res, final RequestContext requestContext, final Map<String, String> wireAttrs, final NextFilter<StreamRequest, StreamResponse> nextFilter) {
StreamResponse response = res;
try {
String responseCompression = (String) requestContext.getLocalAttr(HttpConstants.ACCEPT_ENCODING);
if (responseCompression == null) {
throw new CompressionException(HttpConstants.ACCEPT_ENCODING + " not in local attribute.");
}
List<AcceptEncoding> parsedEncodings = AcceptEncoding.parseAcceptEncodingHeader(responseCompression, _supportedEncoding);
StreamEncodingType selectedEncoding = AcceptEncoding.chooseBest(parsedEncodings);
// Check if there exists an acceptable encoding
if (selectedEncoding == null) {
// Not acceptable encoding status
response = new StreamResponseBuilder().setStatus(HttpConstants.NOT_ACCEPTABLE).build(EntityStreams.emptyStream());
} else if (selectedEncoding != StreamEncodingType.IDENTITY) {
final int threshold = (Integer) requestContext.getLocalAttr(HttpConstants.HEADER_RESPONSE_COMPRESSION_THRESHOLD);
final StreamingCompressor compressor = selectedEncoding.getCompressor(_executor);
PartialReader reader = new PartialReader(threshold, new Callback<EntityStream[]>() {
@Override
public void onError(Throwable ex) {
nextFilter.onError(ex, requestContext, wireAttrs);
}
@Override
public void onSuccess(EntityStream[] results) {
if (// entity stream is less than threshold
results.length == 1) {
StreamResponse response = res.builder().build(results[0]);
nextFilter.onResponse(response, requestContext, wireAttrs);
} else {
EntityStream compressedStream = compressor.deflate(EntityStreams.newEntityStream(new CompositeWriter(results)));
StreamResponseBuilder builder = res.builder();
// remove original content-length header if presents.
if (builder.getHeader(HttpConstants.CONTENT_LENGTH) != null) {
Map<String, String> headers = stripHeaders(builder.getHeaders(), HttpConstants.CONTENT_LENGTH);
builder.setHeaders(headers);
}
StreamResponse response = builder.addHeaderValue(HttpConstants.CONTENT_ENCODING, compressor.getContentEncodingName()).build(compressedStream);
nextFilter.onResponse(response, requestContext, wireAttrs);
}
}
});
res.getEntityStream().setReader(reader);
return;
}
} catch (CompressionException e) {
LOG.error(e.getMessage(), e.getCause());
}
nextFilter.onResponse(response, requestContext, wireAttrs);
}
use of com.linkedin.r2.message.stream.StreamResponseBuilder in project rest.li by linkedin.
the class ServerStreamCompressionFilter method onStreamRequest.
/**
* Handles compression tasks for incoming requests
*/
@Override
public void onStreamRequest(StreamRequest req, RequestContext requestContext, Map<String, String> wireAttrs, NextFilter<StreamRequest, StreamResponse> nextFilter) {
try {
// Check if the request is compressed, if so, decompress
String requestContentEncoding = req.getHeader(HttpConstants.CONTENT_ENCODING);
if (requestContentEncoding != null) {
// This must be a specific compression type other than *
StreamEncodingType encoding = StreamEncodingType.get(requestContentEncoding.trim().toLowerCase());
if (encoding == null || encoding == StreamEncodingType.ANY) {
throw new CompressionException(CompressionConstants.UNSUPPORTED_ENCODING + requestContentEncoding);
}
// Process the correct content-encoding types only
StreamingCompressor compressor = encoding.getCompressor(_executor);
if (compressor == null) {
throw new CompressionException(CompressionConstants.UNKNOWN_ENCODING + encoding);
}
EntityStream uncompressedStream = compressor.inflate(req.getEntityStream());
Map<String, String> headers = stripHeaders(req.getHeaders(), HttpConstants.CONTENT_ENCODING, HttpConstants.CONTENT_LENGTH);
req = req.builder().setHeaders(headers).build(uncompressedStream);
}
// Get client support for compression and flag compress if need be
String responseCompression = req.getHeader(HttpConstants.ACCEPT_ENCODING);
if (responseCompression == null) {
// per RFC 2616, section 14.3, if no Accept-Encoding field is present in a request,
// server SHOULD use "identity" content-encoding if it is available.
responseCompression = StreamEncodingType.IDENTITY.getHttpName();
}
if (!responseCompression.equalsIgnoreCase(StreamEncodingType.IDENTITY.getHttpName())) {
requestContext.putLocalAttr(HttpConstants.HEADER_RESPONSE_COMPRESSION_THRESHOLD, _serverCompressionHelper.getResponseCompressionThreshold(req));
}
requestContext.putLocalAttr(HttpConstants.ACCEPT_ENCODING, responseCompression);
nextFilter.onRequest(req, requestContext, wireAttrs);
} catch (CompressionException ex) {
LOG.error(ex.getMessage(), ex.getCause());
StreamResponse streamResponse = new StreamResponseBuilder().setStatus(HttpConstants.UNSUPPORTED_MEDIA_TYPE).build(EntityStreams.emptyStream());
nextFilter.onError(new StreamException(streamResponse, ex), requestContext, wireAttrs);
}
}
use of com.linkedin.r2.message.stream.StreamResponseBuilder in project rest.li by linkedin.
the class TestBackupRequestsClient method createAlwaysBackupClientWithHosts.
private BackupRequestsClient createAlwaysBackupClientWithHosts(List<String> uris, Deque<URI> hostsReceivingRequestList, int responseDelayNano, int backupDelayNano, boolean isD2Async) throws IOException {
Map<URI, Map<Integer, PartitionData>> partitionDescriptions = new HashMap<>();
uris.forEach(uri -> partitionDescriptions.put(URI.create(uri), Collections.singletonMap(0, new PartitionData(1))));
StaticLoadBalancerState LbState = new StaticLoadBalancerState() {
@Override
public TrackerClient getClient(String serviceName, URI uri) {
return new DegraderTrackerClientImpl(uri, partitionDescriptions.get(uri), null, SystemClock.instance(), null) {
@Override
public void restRequest(RestRequest request, RequestContext requestContext, Map<String, String> wireAttrs, TransportCallback<RestResponse> callback) {
// whenever a trackerClient is used to make request, record down it's hostname
hostsReceivingRequestList.add(uri);
// delay response to allow backup request to happen
_executor.schedule(() -> callback.onResponse(TransportResponseImpl.success(new RestResponseBuilder().build())), responseDelayNano, TimeUnit.NANOSECONDS);
}
@Override
public void streamRequest(StreamRequest request, RequestContext requestContext, Map<String, String> wireAttrs, TransportCallback<StreamResponse> callback) {
// whenever a trackerClient is used to make request, record down it's hostname
hostsReceivingRequestList.add(uri);
if (null != requestContext.getLocalAttr(R2Constants.BACKUP_REQUEST_BUFFERED_BODY)) {
callback.onResponse(TransportResponseImpl.success(new StreamResponseBuilder().setHeader(BUFFERED_HEADER, String.valueOf(requestContext.getLocalAttr(R2Constants.BACKUP_REQUEST_BUFFERED_BODY) != null)).build(EntityStreams.emptyStream())));
return;
}
request.getEntityStream().setReader(new DrainReader() {
public void onDone() {
// delay response to allow backup request to happen
_executor.schedule(() -> callback.onResponse(TransportResponseImpl.success(new StreamResponseBuilder().setHeader(BUFFERED_HEADER, String.valueOf(requestContext.getLocalAttr(R2Constants.BACKUP_REQUEST_BUFFERED_BODY) != null)).build(EntityStreams.emptyStream()))), responseDelayNano, TimeUnit.NANOSECONDS);
}
});
}
};
}
};
LbState.TEST_URIS_PARTITIONDESCRIPTIONS.putAll(partitionDescriptions);
LbState.TEST_SERVICE_BACKUP_REQUEST_PROPERTIES.add(createBackupRequestsConfiguration(5, "get"));
LbState.refreshDefaultProperties();
LoadBalancer loadBalancer = new SimpleLoadBalancer(LbState, _executor);
DynamicClient dynamicClient = new DynamicClient(loadBalancer, null);
return new BackupRequestsClient(dynamicClient, loadBalancer, _executor, null, 10, TimeUnit.SECONDS, isD2Async) {
@Override
Optional<TrackingBackupRequestsStrategy> getStrategyAfterUpdate(final String serviceName, final String operation) {
// constantly enable backup request after backupDelayNano time.
BackupRequestsStrategy alwaysBackup = new TestTrackingBackupRequestsStrategy.MockBackupRequestsStrategy(() -> Optional.of((long) backupDelayNano), () -> true);
return Optional.of(new TrackingBackupRequestsStrategy(alwaysBackup));
}
};
}
use of com.linkedin.r2.message.stream.StreamResponseBuilder in project rest.li by linkedin.
the class Http2FrameListener method onHeadersRead.
@Override
public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endOfStream) throws Http2Exception {
LOG.debug("Received HTTP/2 HEADERS frame, stream={}, end={}, headers={}, padding={}bytes", new Object[] { streamId, endOfStream, headers.size(), padding });
// Ignores response for the upgrade request
if (streamId == Http2CodecUtil.HTTP_UPGRADE_STREAM_ID) {
return;
}
// Refactored duplicate code to new code pipeline.
final StreamResponseBuilder builder = Http2MessageDecoders.ResponseDecoder.buildStreamResponse(headers);
// Gets async pool handle from stream properties
TimeoutAsyncPoolHandle<?> timeoutHandle = Http2PipelinePropertyUtil.remove(ctx, _connection, streamId, Http2ClientPipelineInitializer.CHANNEL_POOL_HANDLE_ATTR_KEY);
if (timeoutHandle == null) {
_lifecycleManager.onError(ctx, false, Http2Exception.connectionError(Http2Error.PROTOCOL_ERROR, "No channel pool handle is associated with this stream", streamId));
return;
}
final StreamResponse response;
if (endOfStream) {
response = builder.build(EntityStreams.emptyStream());
// Release the handle to put the channel back to the pool
timeoutHandle.release();
} else {
// Associate an entity stream writer to the HTTP/2 stream
final TimeoutBufferedWriter writer = new TimeoutBufferedWriter(ctx, streamId, _maxContentLength, timeoutHandle);
if (_connection.stream(streamId).setProperty(_writerKey, writer) != null) {
_lifecycleManager.onError(ctx, false, Http2Exception.connectionError(Http2Error.PROTOCOL_ERROR, "Another writer has already been associated with current stream ID", streamId));
return;
}
// Prepares StreamResponse for the channel pipeline
EntityStream entityStream = EntityStreams.newEntityStream(writer);
response = builder.build(entityStream);
}
// Gets callback from stream properties
TransportCallback<?> callback = Http2PipelinePropertyUtil.remove(ctx, _connection, streamId, Http2ClientPipelineInitializer.CALLBACK_ATTR_KEY);
if (callback != null) {
ctx.fireChannelRead(new ResponseWithCallback<Response, TransportCallback<?>>(response, callback));
}
}
Aggregations