use of com.linkedin.r2.filter.compression.ClientStreamCompressionFilter in project rest.li by linkedin.
the class TestRequestCompression method requestCompressionData.
@DataProvider
public Object[][] requestCompressionData() {
StreamEncodingType[] encodings = new StreamEncodingType[] { StreamEncodingType.GZIP, StreamEncodingType.DEFLATE, StreamEncodingType.SNAPPY_FRAMED, StreamEncodingType.BZIP2 };
String[] protocols = new String[] { HttpProtocolVersion.HTTP_1_1.name(), HttpProtocolVersion.HTTP_2.name() };
Object[][] args = new Object[encodings.length * protocols.length][2];
int cur = 0;
for (StreamEncodingType requestEncoding : encodings) {
for (String protocol : protocols) {
StreamFilter clientCompressionFilter = new ClientStreamCompressionFilter(requestEncoding, new CompressionConfig(THRESHOLD), null, new CompressionConfig(THRESHOLD), Arrays.asList(new String[] { "*" }), _executor);
TransportClientFactory factory = new HttpClientFactory.Builder().setFilterChain(FilterChains.createStreamChain(clientCompressionFilter)).build();
HashMap<String, String> properties = new HashMap<>();
properties.put(HttpClientFactory.HTTP_PROTOCOL_VERSION, protocol);
Client client = new TransportClientAdapter(factory.getClient(properties), true);
args[cur][0] = client;
args[cur][1] = URI.create("/" + requestEncoding.getHttpName());
cur++;
_clientFactories.add(factory);
_clients.add(client);
}
}
return args;
}
use of com.linkedin.r2.filter.compression.ClientStreamCompressionFilter in project rest.li by linkedin.
the class TestClientStreamCompressionFilter method testRequestCompressionRules.
@Test(dataProvider = "requestData")
public void testRequestCompressionRules(CompressionConfig requestCompressionConfig, CompressionOption requestCompressionOverride, boolean headerShouldBePresent, String operation) throws CompressionException, URISyntaxException, InterruptedException, ExecutionException, TimeoutException {
Executor executor = Executors.newCachedThreadPool();
ClientStreamCompressionFilter clientCompressionFilter = new ClientStreamCompressionFilter(StreamEncodingType.GZIP.getHttpName(), requestCompressionConfig, ACCEPT_COMPRESSIONS, new CompressionConfig(Integer.MAX_VALUE), Arrays.asList(ClientCompressionHelper.COMPRESS_ALL_RESPONSES_INDICATOR), executor);
// The entity should be compressible for this test.
int original = 100;
byte[] entity = new byte[original];
Arrays.fill(entity, (byte) 'A');
StreamRequest streamRequest = new StreamRequestBuilder(new URI(URI)).setMethod(RestMethod.POST).build(EntityStreams.newEntityStream(new ByteStringWriter(ByteString.copy(entity))));
int compressed = EncodingType.GZIP.getCompressor().deflate(new ByteArrayInputStream(entity)).length;
RequestContext context = new RequestContext();
if (operation != null) {
context.putLocalAttr(R2Constants.OPERATION, operation);
}
context.putLocalAttr(R2Constants.REQUEST_COMPRESSION_OVERRIDE, requestCompressionOverride);
int entityLength = headerShouldBePresent ? compressed : original;
HeaderCaptureFilter captureFilter = new HeaderCaptureFilter(HttpConstants.CONTENT_ENCODING, headerShouldBePresent, entityLength);
clientCompressionFilter.onStreamRequest(streamRequest, context, Collections.<String, String>emptyMap(), captureFilter);
FutureCallback<ByteString> callback = new FutureCallback<ByteString>();
FullEntityReader reader = new FullEntityReader(callback);
captureFilter.getEntityStream().setReader(reader);
ByteString entityRead = callback.get(10, TimeUnit.SECONDS);
Assert.assertEquals(entityRead.length(), entityLength);
}
use of com.linkedin.r2.filter.compression.ClientStreamCompressionFilter in project rest.li by linkedin.
the class TestClientStreamCompressionFilter method testCompressionOperations.
@Test(dataProvider = "operationsData")
public void testCompressionOperations(String compressionConfig, String[] operations, boolean headerShouldBePresent) throws URISyntaxException {
StreamRequest streamRequest = new StreamRequestBuilder(new URI(URI)).build(EntityStreams.emptyStream());
ClientStreamCompressionFilter clientCompressionFilter = new ClientStreamCompressionFilter(StreamEncodingType.IDENTITY.getHttpName(), new CompressionConfig(Integer.MAX_VALUE), ACCEPT_COMPRESSIONS, new CompressionConfig(Integer.MAX_VALUE), Arrays.asList(compressionConfig.split(",")), Executors.newCachedThreadPool());
for (String operation : operations) {
RequestContext context = new RequestContext();
context.putLocalAttr(R2Constants.OPERATION, operation);
clientCompressionFilter.onStreamRequest(streamRequest, context, Collections.<String, String>emptyMap(), new HeaderCaptureFilter(HttpConstants.ACCEPT_ENCODING, headerShouldBePresent));
}
}
use of com.linkedin.r2.filter.compression.ClientStreamCompressionFilter in project rest.li by linkedin.
the class HttpClientFactory method getClient.
/**
* Create a new {@link TransportClient} with the specified properties,
* {@link SSLContext} and {@link SSLParameters}
*
* @param properties map of properties for the {@link TransportClient}
* @param sslContext {@link SSLContext} to be used for requests over SSL/TLS.
* @param sslParameters {@link SSLParameters} to configure secure connections.
* @return an appropriate {@link TransportClient} instance, as specified by the properties.
*/
private TransportClient getClient(Map<String, ? extends Object> properties, SSLContext sslContext, SSLParameters sslParameters) {
LOG.info("Getting a client with configuration {} and SSLContext {}", properties, sslContext);
TransportClient client = getRawClient(properties, sslContext, sslParameters);
List<String> httpRequestServerSupportedEncodings = ConfigValueExtractor.buildList(properties.remove(HTTP_REQUEST_CONTENT_ENCODINGS), LIST_SEPARATOR);
// In the old model, responses were compressed according to the type of method, so clients would send
// the Accept-Encoding header if the method was in HTTP_RESPONSE_COMPRESSION_OPERATIONS.
// In the new model, responses are compressed according to its size, so clients send the Accept-Encoding header
// if the server enabled response compression by setting HTTP_USE_RESPONSE_COMPRESSION to true.
// Until all servers migrate to the new model, clients will understand both models,
// and send the Accept-Encoding header if either the old or the new criterion is satisfied.
List<String> httpResponseCompressionOperations = ConfigValueExtractor.buildList(properties.remove(HTTP_RESPONSE_COMPRESSION_OPERATIONS), LIST_SEPARATOR);
String useResponseCompressionProperty = (String) properties.get(HTTP_USE_RESPONSE_COMPRESSION);
if (useResponseCompressionProperty != null && Boolean.parseBoolean(useResponseCompressionProperty)) {
httpResponseCompressionOperations.add(ClientCompressionHelper.COMPRESS_ALL_RESPONSES_INDICATOR);
}
FilterChain filters = _filters;
if (_useClientCompression) {
List<String> responseEncodings = null;
if (properties.containsKey(HTTP_RESPONSE_CONTENT_ENCODINGS)) {
responseEncodings = ConfigValueExtractor.buildList(properties.remove(HTTP_RESPONSE_CONTENT_ENCODINGS), LIST_SEPARATOR);
}
String httpServiceName = (String) properties.get(HTTP_SERVICE_NAME);
EncodingType restRequestContentEncoding = getRestRequestContentEncoding(httpRequestServerSupportedEncodings);
StreamEncodingType streamRequestContentEncoding = getStreamRequestContentEncoding(httpRequestServerSupportedEncodings);
if (restRequestContentEncoding != EncodingType.IDENTITY || !httpResponseCompressionOperations.isEmpty()) {
filters = filters.addLastRest(new ClientCompressionFilter(restRequestContentEncoding, getRestRequestCompressionConfig(httpServiceName, restRequestContentEncoding), buildRestAcceptEncodingSchemaNames(responseEncodings), _responseCompressionConfigs.get(httpServiceName), httpResponseCompressionOperations));
}
if (streamRequestContentEncoding != StreamEncodingType.IDENTITY || !httpResponseCompressionOperations.isEmpty()) {
CompressionConfig compressionConfig = getStreamRequestCompressionConfig(httpServiceName, streamRequestContentEncoding);
filters = filters.addLast(new ClientStreamCompressionFilter(streamRequestContentEncoding, compressionConfig, buildStreamAcceptEncodingSchemas(responseEncodings), _responseCompressionConfigs.get(httpServiceName), httpResponseCompressionOperations, _compressionExecutor));
}
}
Integer queryPostThreshold = chooseNewOverDefault(getIntValue(properties, HTTP_QUERY_POST_THRESHOLD), Integer.MAX_VALUE);
ClientQueryTunnelFilter clientQueryTunnelFilter = new ClientQueryTunnelFilter(queryPostThreshold);
filters = filters.addLastRest(clientQueryTunnelFilter);
filters = filters.addLast(clientQueryTunnelFilter);
// Add the disruptor filter to the end of the filter chain to get the most accurate simulation of disrupt
Integer requestTimeout = chooseNewOverDefault(getIntValue(properties, HTTP_REQUEST_TIMEOUT), DEFAULT_REQUEST_TIMEOUT);
DisruptFilter disruptFilter = new DisruptFilter(_executor, _eventLoopGroup, requestTimeout);
filters = filters.addLastRest(disruptFilter);
filters = filters.addLast(disruptFilter);
client = new FilterChainClient(client, filters);
client = new FactoryClient(client);
synchronized (_mutex) {
if (!_running) {
throw new IllegalStateException("Factory is shutting down");
}
_clientsOutstanding++;
return client;
}
}
use of com.linkedin.r2.filter.compression.ClientStreamCompressionFilter in project rest.li by linkedin.
the class TestCompressionEcho method compressionEchoData.
@DataProvider
public Object[][] compressionEchoData() {
StreamEncodingType[] encodings = new StreamEncodingType[] { StreamEncodingType.GZIP, StreamEncodingType.DEFLATE, StreamEncodingType.SNAPPY_FRAMED, StreamEncodingType.BZIP2, StreamEncodingType.IDENTITY };
Object[][] args = new Object[2 * encodings.length * encodings.length][2];
int cur = 0;
for (StreamEncodingType requestEncoding : encodings) {
for (StreamEncodingType acceptEncoding : encodings) {
StreamFilter clientCompressionFilter = new ClientStreamCompressionFilter(requestEncoding, new CompressionConfig(THRESHOLD), new StreamEncodingType[] { acceptEncoding }, new CompressionConfig(THRESHOLD), Arrays.asList(new String[] { "*" }), _executor);
TransportClientFactory factory = new HttpClientFactory.Builder().setFilterChain(FilterChains.createStreamChain(clientCompressionFilter)).build();
Client client = new TransportClientAdapter(factory.getClient(getClientProperties()), true);
args[cur][0] = client;
args[cur][1] = LARGE_BYTES_NUM;
cur++;
_clientFactories.add(factory);
_clients.add(client);
}
}
// test data that won't trigger compression
for (StreamEncodingType requestEncoding : encodings) {
for (StreamEncodingType acceptEncoding : encodings) {
StreamFilter clientCompressionFilter = new ClientStreamCompressionFilter(requestEncoding, new CompressionConfig(THRESHOLD), new StreamEncodingType[] { acceptEncoding }, new CompressionConfig(THRESHOLD), Arrays.asList(new String[] { "*" }), _executor);
TransportClientFactory factory = new HttpClientFactory.Builder().setFilterChain(FilterChains.createStreamChain(clientCompressionFilter)).build();
Client client = new TransportClientAdapter(factory.getClient(getClientProperties()), true);
args[cur][0] = client;
args[cur][1] = SMALL_BYTES_NUM;
cur++;
_clientFactories.add(factory);
_clients.add(client);
}
}
return args;
}
Aggregations