use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpContent in project ambry by linkedin.
the class CopyForcingByteBuf method setDigestAfterReadTest.
// digestIncorrectUsageTest() helpers.
/**
* Tests for failure when {@link NettyRequest#setDigestAlgorithm(String)} after
* {@link NettyRequest#readInto(AsyncWritableChannel, Callback)} is called.
* @throws NoSuchAlgorithmException
* @throws RestServiceException
*/
private void setDigestAfterReadTest() throws NoSuchAlgorithmException, RestServiceException {
List<HttpContent> httpContents = new ArrayList<HttpContent>();
generateContent(httpContents);
Channel channel = new MockChannel();
NettyRequest nettyRequest = createNettyRequest(HttpMethod.POST, "/", null, channel);
ByteBufferAsyncWritableChannel writeChannel = new ByteBufferAsyncWritableChannel();
ReadIntoCallback callback = new ReadIntoCallback();
nettyRequest.readInto(writeChannel, callback);
try {
nettyRequest.setDigestAlgorithm("MD5");
fail("Setting a digest algorithm should have failed because readInto() has already been called");
} catch (IllegalStateException e) {
// expected. Nothing to do.
}
writeChannel.close();
closeRequestAndValidate(nettyRequest, channel);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpContent in project ambry by linkedin.
the class CopyForcingByteBuf method setBadAlgorithmTest.
/**
* Tests for failure when {@link NettyRequest#setDigestAlgorithm(String)} is called with an unrecognized algorithm.
* @throws RestServiceException
*/
private void setBadAlgorithmTest() throws RestServiceException {
List<HttpContent> httpContents = new ArrayList<HttpContent>();
generateContent(httpContents);
Channel channel = new MockChannel();
NettyRequest nettyRequest = createNettyRequest(HttpMethod.POST, "/", null, channel);
try {
nettyRequest.setDigestAlgorithm("NonExistentAlgorithm");
fail("Setting a digest algorithm should have failed because the algorithm isn't valid");
} catch (NoSuchAlgorithmException e) {
// expected. Nothing to do.
}
closeRequestAndValidate(nettyRequest, channel);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpContent in project ambry by linkedin.
the class CopyForcingByteBuf method doBackPressureTest.
/**
* Does the backpressure test by ensuring that {@link Channel#read()} isn't called when the number of bytes buffered
* is above the {@link NettyRequest#bufferWatermark}. Also ensures that {@link Channel#read()} is called correctly
* when the number of buffered bytes falls below the {@link NettyRequest#bufferWatermark}.
* @param digestAlgorithm the digest algorithm to use. Can be empty or {@code null} if digest checking is not
* required.
* @param content the complete content.
* @param httpContents {@code content} in parts and as {@link HttpContent}. Should contain all the data in
* {@code content}.
* @param numChunksToAddBeforeRead the number of {@link HttpContent} to add before making the
* {@link NettyRequest#readInto(AsyncWritableChannel, Callback)} call.
* @param method Http Method
* @throws Exception
*/
private void doBackPressureTest(String digestAlgorithm, ByteBuffer content, List<HttpContent> httpContents, int numChunksToAddBeforeRead, HttpMethod method) throws Exception {
if (numChunksToAddBeforeRead < 0 || numChunksToAddBeforeRead > httpContents.size()) {
throw new IllegalArgumentException("Illegal value of numChunksToAddBeforeRead");
}
MockChannel channel = new MockChannel();
final NettyRequest nettyRequest = createNettyRequest(method, "/", null, channel);
byte[] wholeDigest = null;
if (digestAlgorithm != null && !digestAlgorithm.isEmpty()) {
MessageDigest digest = MessageDigest.getInstance(digestAlgorithm);
digest.update(content);
wholeDigest = digest.digest();
content.rewind();
nettyRequest.setDigestAlgorithm(digestAlgorithm);
}
final AtomicInteger queuedReads = new AtomicInteger(0);
ByteBufferAsyncWritableChannel writeChannel = new ByteBufferAsyncWritableChannel();
ReadIntoCallback callback = new ReadIntoCallback();
channel.setChannelReadCallback(new MockChannel.ChannelReadCallback() {
@Override
public void onRead() {
queuedReads.incrementAndGet();
}
});
int addedCount = 0;
Future<Long> future = null;
boolean suspended = false;
int bytesToVerify = 0;
while (addedCount < httpContents.size()) {
if (suspended) {
assertEquals("There should have been no reads queued when over buffer watermark", 0, queuedReads.get());
if (future == null) {
future = nettyRequest.readInto(writeChannel, callback);
}
int chunksRead = readAndVerify(bytesToVerify, writeChannel, content);
assertEquals("Number of reads triggered is not as expected", chunksRead, queuedReads.get());
// collapse many reads into one
queuedReads.set(1);
bytesToVerify = 0;
suspended = false;
} else {
assertEquals("There should have been only one read queued", 1, queuedReads.get());
queuedReads.set(0);
if (future == null && addedCount == numChunksToAddBeforeRead) {
future = nettyRequest.readInto(writeChannel, callback);
}
final HttpContent httpContent = httpContents.get(addedCount);
bytesToVerify += (httpContent.content().readableBytes());
suspended = bytesToVerify >= NettyRequest.bufferWatermark;
addedCount++;
nettyRequest.addContent(httpContent);
assertEquals("Reference count is not as expected", 2, httpContent.refCnt());
}
}
if (future == null) {
future = nettyRequest.readInto(writeChannel, callback);
}
readAndVerify(bytesToVerify, writeChannel, content);
verifyRefCnts(httpContents);
writeChannel.close();
callback.awaitCallback();
if (callback.exception != null) {
throw callback.exception;
}
long futureBytesRead = future.get(1, TimeUnit.SECONDS);
assertEquals("Total bytes read does not match (callback)", content.limit(), callback.bytesRead);
assertEquals("Total bytes read does not match (future)", content.limit(), futureBytesRead);
// check twice to make sure the same digest is returned every time
for (int i = 0; i < 2; i++) {
assertArrayEquals("Part by part digest should match digest of whole", wholeDigest, nettyRequest.getDigest());
}
closeRequestAndValidate(nettyRequest, channel);
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpContent in project ambry by linkedin.
the class CopyForcingByteBuf method closeTest.
/**
* Tests that {@link NettyRequest#close()} leaves any added {@link HttpContent} the way it was before it was added.
* (i.e no reference count changes).
* @throws RestServiceException
*/
@Test
public void closeTest() throws RestServiceException {
Channel channel = new MockChannel();
NettyRequest nettyRequest = createNettyRequest(HttpMethod.POST, "/", null, channel);
Queue<HttpContent> httpContents = new LinkedBlockingQueue<HttpContent>();
for (int i = 0; i < 5; i++) {
ByteBuffer content = ByteBuffer.wrap(TestUtils.getRandomBytes(1024));
HttpContent httpContent = new DefaultHttpContent(Unpooled.wrappedBuffer(content));
nettyRequest.addContent(httpContent);
httpContents.add(httpContent);
}
closeRequestAndValidate(nettyRequest, channel);
while (httpContents.peek() != null) {
assertEquals("Reference count of http content has changed", 1, httpContents.poll().refCnt());
}
}
use of org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpContent in project ambry by linkedin.
the class CopyForcingByteBuf method sizeInHeaderLessThanContentTest.
/**
* Tests reaction of NettyRequest when content size is more than the size specified in the headers.
* @throws Exception
*/
private void sizeInHeaderLessThanContentTest() throws Exception {
List<HttpContent> httpContents = new ArrayList<HttpContent>();
ByteBuffer content = generateContent(httpContents);
HttpHeaders httpHeaders = new DefaultHttpHeaders();
int lastHttpContentSize = httpContents.get(httpContents.size() - 1).content().readableBytes();
httpHeaders.set(HttpHeaderNames.CONTENT_LENGTH, content.limit() - lastHttpContentSize - 1);
doHeaderAndContentSizeMismatchTest(httpHeaders, httpContents);
}
Aggregations