use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.
the class TestMIMEChainingSinglePart method testSinglePartDataSource.
// Verifies that a single part mime reader can be used as a data source to the writer.
// To make the test easier to write, we simply chain back to the client in the form of simulating a response.
@Test(dataProvider = "chunkSizes")
public void testSinglePartDataSource(final int chunkSize) throws Exception {
final List<MultiPartMIMEDataSourceWriter> dataSources = generateInputStreamDataSources(chunkSize, _scheduledExecutorService);
final MultiPartMIMEWriter writer = new MultiPartMIMEWriter.Builder().appendDataSources(dataSources).build();
final StreamRequest streamRequest = mock(StreamRequest.class);
when(streamRequest.getEntityStream()).thenReturn(writer.getEntityStream());
final String contentTypeHeader = "multipart/mixed; boundary=" + writer.getBoundary();
when(streamRequest.getHeader(MultiPartMIMEUtils.CONTENT_TYPE_HEADER)).thenReturn(contentTypeHeader);
// Client side preparation to read the part back on the callback
// Note the chunks size will carry over since the client is controlling how much data he gets back
// based on the chunk size he writes.
MIMETestUtils.MultiPartMIMEFullReaderCallback clientReceiver = new MIMETestUtils.MultiPartMIMEFullReaderCallback();
Callback<StreamResponse> callback = generateSuccessChainCallback(clientReceiver);
// Server side start
MultiPartMIMEReader reader = MultiPartMIMEReader.createAndAcquireStream(streamRequest);
final CountDownLatch latch = new CountDownLatch(1);
ServerMultiPartMIMEReaderSinglePartSenderCallback serverSender = new ServerMultiPartMIMEReaderSinglePartSenderCallback(latch, callback);
reader.registerReaderCallback(serverSender);
latch.await(_testTimeout, TimeUnit.MILLISECONDS);
// Verify client
Assert.assertEquals(clientReceiver.getSinglePartMIMEReaderCallbacks().size(), 1);
Assert.assertEquals(clientReceiver.getSinglePartMIMEReaderCallbacks().get(0).getFinishedData(), BODY_A.getPartData());
Assert.assertEquals(clientReceiver.getSinglePartMIMEReaderCallbacks().get(0).getHeaders(), BODY_A.getPartHeaders());
// Verify server
List<MIMETestUtils.SinglePartMIMEFullReaderCallback> singlePartMIMEReaderCallbacks = serverSender.getSinglePartMIMEReaderCallbacks();
Assert.assertEquals(singlePartMIMEReaderCallbacks.size(), 3);
Assert.assertEquals(singlePartMIMEReaderCallbacks.get(0).getFinishedData(), BODY_B.getPartData());
Assert.assertEquals(singlePartMIMEReaderCallbacks.get(0).getHeaders(), BODY_B.getPartHeaders());
Assert.assertEquals(singlePartMIMEReaderCallbacks.get(1).getFinishedData(), BODY_C.getPartData());
Assert.assertEquals(singlePartMIMEReaderCallbacks.get(1).getHeaders(), BODY_C.getPartHeaders());
Assert.assertEquals(singlePartMIMEReaderCallbacks.get(2).getFinishedData(), BODY_D.getPartData());
Assert.assertEquals(singlePartMIMEReaderCallbacks.get(2).getHeaders(), BODY_D.getPartHeaders());
}
use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.
the class TestMIMEChainingAlternate method testAlternateSinglePartDataSource.
// This test has the server alternate between consuming a part and sending a part as a data source
// to a writer.
// Since we have four parts, the server will consume the 2nd and 4th and send out the 1st and 3rd.
// To make the test easier we will have two callbacks to send to the server to indicate
// the presence of each data source.
// This violates the typical client/server http pattern, but accomplishes the purpose of this test
// and it makes it easier to write.
@Test(dataProvider = "chunkSizes")
public void testAlternateSinglePartDataSource(final int chunkSize) throws Exception {
final List<MultiPartMIMEDataSourceWriter> dataSources = generateInputStreamDataSources(chunkSize, _scheduledExecutorService);
final MultiPartMIMEWriter writer = new MultiPartMIMEWriter.Builder().appendDataSources(dataSources).build();
final StreamRequest streamRequest = mock(StreamRequest.class);
when(streamRequest.getEntityStream()).thenReturn(writer.getEntityStream());
final String contentTypeHeader = "multipart/mixed; boundary=" + writer.getBoundary();
when(streamRequest.getHeader(MultiPartMIMEUtils.CONTENT_TYPE_HEADER)).thenReturn(contentTypeHeader);
// Client side preparation to read the part back on the callback.
// We have two callbacks here since we will get two responses.
// Note the chunks size will carry over since the client is controlling how much data he gets back
// based on the chunk size he writes.
MIMETestUtils.MultiPartMIMEFullReaderCallback clientReceiverA = new MIMETestUtils.MultiPartMIMEFullReaderCallback();
MIMETestUtils.MultiPartMIMEFullReaderCallback clientReceiverB = new MIMETestUtils.MultiPartMIMEFullReaderCallback();
Callback<StreamResponse> callbackA = generateSuccessChainCallback(clientReceiverA);
Callback<StreamResponse> callbackB = generateSuccessChainCallback(clientReceiverB);
// Server side start
MultiPartMIMEReader reader = MultiPartMIMEReader.createAndAcquireStream(streamRequest);
final CountDownLatch latch = new CountDownLatch(1);
ServerMultiPartMIMEAlternatorCallback serverSender = new ServerMultiPartMIMEAlternatorCallback(latch, callbackA, callbackB);
reader.registerReaderCallback(serverSender);
latch.await(_testTimeout, TimeUnit.MILLISECONDS);
// Verify client
Assert.assertEquals(clientReceiverA.getSinglePartMIMEReaderCallbacks().size(), 1);
Assert.assertEquals(clientReceiverA.getSinglePartMIMEReaderCallbacks().get(0).getFinishedData(), BODY_A.getPartData());
Assert.assertEquals(clientReceiverA.getSinglePartMIMEReaderCallbacks().get(0).getHeaders(), BODY_A.getPartHeaders());
Assert.assertEquals(clientReceiverB.getSinglePartMIMEReaderCallbacks().size(), 1);
Assert.assertEquals(clientReceiverB.getSinglePartMIMEReaderCallbacks().get(0).getFinishedData(), BODY_C.getPartData());
Assert.assertEquals(clientReceiverB.getSinglePartMIMEReaderCallbacks().get(0).getHeaders(), BODY_C.getPartHeaders());
// Verify server
Assert.assertEquals(serverSender.getSinglePartMIMEReaderCallbacks().size(), 2);
Assert.assertEquals(serverSender.getSinglePartMIMEReaderCallbacks().get(0).getFinishedData(), BODY_B.getPartData());
Assert.assertEquals(serverSender.getSinglePartMIMEReaderCallbacks().get(0).getHeaders(), BODY_B.getPartHeaders());
Assert.assertEquals(serverSender.getSinglePartMIMEReaderCallbacks().get(1).getFinishedData(), BODY_D.getPartData());
Assert.assertEquals(serverSender.getSinglePartMIMEReaderCallbacks().get(1).getHeaders(), BODY_D.getPartHeaders());
}
use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.
the class AbstractAsyncR2Servlet method service.
@Override
public void service(final HttpServletRequest req, final HttpServletResponse resp) throws ServletException, IOException {
RequestContext requestContext = ServletHelper.readRequestContext(req);
RestRequest restRequest;
try {
restRequest = readFromServletRequest(req);
} catch (URISyntaxException e) {
writeToServletError(resp, RestStatus.BAD_REQUEST, e.toString());
return;
}
final AsyncContext ctx = req.startAsync(req, resp);
ctx.setTimeout(_timeout);
ctx.addListener(new AsyncListener() {
@Override
public void onTimeout(AsyncEvent event) throws IOException {
AsyncContext ctx = event.getAsyncContext();
writeToServletError((HttpServletResponse) ctx.getResponse(), RestStatus.INTERNAL_SERVER_ERROR, "Server Timeout");
ctx.complete();
}
@Override
public void onStartAsync(AsyncEvent event) throws IOException {
// Nothing to do here
}
@Override
public void onError(AsyncEvent event) throws IOException {
writeToServletError((HttpServletResponse) event.getSuppliedResponse(), RestStatus.INTERNAL_SERVER_ERROR, "Server Error");
ctx.complete();
}
@Override
public void onComplete(AsyncEvent event) throws IOException {
Object exception = req.getAttribute(TRANSPORT_CALLBACK_IOEXCEPTION);
if (exception != null)
throw new IOException((IOException) exception);
}
});
TransportCallback<RestResponse> callback = new TransportCallback<RestResponse>() {
@Override
public void onResponse(final TransportResponse<RestResponse> response) {
// TransportCallback is usually invoked by non-servlet threads; hence we cannot assume that it's ok to
// do blocking IO there. As a result, we should use AsyncContext.start() to do blocking IO using the
// container/servlet threads. This still maintains the advantage of Async, meaning servlet thread is not
// blocking-wait when the response is not ready.
ctx.start(new Runnable() {
@Override
public void run() {
try {
writeToServletResponse(response, (HttpServletResponse) ctx.getResponse());
} catch (IOException e) {
req.setAttribute(TRANSPORT_CALLBACK_IOEXCEPTION, e);
} finally {
ctx.complete();
}
}
});
}
};
getDispatcher().handleRequest(restRequest, requestContext, callback);
}
use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.
the class AbstractR2Servlet method service.
@Override
protected void service(final HttpServletRequest req, final HttpServletResponse resp) throws ServletException, IOException {
RequestContext requestContext = ServletHelper.readRequestContext(req);
RestRequest restRequest;
try {
restRequest = readFromServletRequest(req);
} catch (URISyntaxException e) {
writeToServletError(resp, RestStatus.BAD_REQUEST, e.toString());
return;
}
final AtomicReference<TransportResponse<RestResponse>> result = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
TransportCallback<RestResponse> callback = new TransportCallback<RestResponse>() {
@Override
public void onResponse(TransportResponse<RestResponse> response) {
result.set(response);
latch.countDown();
}
};
getDispatcher().handleRequest(restRequest, requestContext, callback);
try {
if (latch.await(_timeout, TimeUnit.MILLISECONDS)) {
writeToServletResponse(result.get(), resp);
} else {
writeToServletError(resp, RestStatus.INTERNAL_SERVER_ERROR, "Server Timeout after " + _timeout + "ms.");
}
} catch (InterruptedException e) {
throw new ServletException("Interrupted!", e);
}
}
use of com.linkedin.r2.transport.common.Server in project rest.li by linkedin.
the class SyncIOHandler method eventLoop.
private void eventLoop() throws ServletException, IOException, InterruptedException, TimeoutException {
final long startTime = System.currentTimeMillis();
byte[] buf = new byte[DEFAULT_DATA_CHUNK_SIZE];
while (shouldContinue() && !_forceExit) {
long timeSpent = System.currentTimeMillis() - startTime;
long maxWaitTime = timeSpent < _timeout ? _timeout - timeSpent : 0;
Event event = _eventQueue.poll(maxWaitTime, TimeUnit.MILLISECONDS);
if (event == null) {
throw new TimeoutException("Timeout after " + _timeout + " milliseconds.");
}
switch(event.getEventType()) {
case ResponseDataAvailable:
{
ByteString data = (ByteString) event.getData();
data.write(_os);
_rh.request(1);
break;
}
case WriteRequestPossible:
{
while (_wh.remaining() > 0) {
final int actualLen = _is.read(buf);
if (actualLen < 0) {
_wh.done();
_requestReadFinished = true;
break;
}
_wh.write(ByteString.copy(buf, 0, actualLen));
}
break;
}
case FullResponseReceived:
{
_os.close();
_responseWriteFinished = true;
break;
}
case ResponseDataError:
{
_os.close();
_responseWriteFinished = true;
break;
}
case WriteRequestAborted:
{
if (event.getData() instanceof AbortedException) {
// reader cancels, we'll drain the stream on behalf of reader
// we don't directly drain it here because we'd like to give other events
// some opportunities to be executed; e.g. return an error response
_eventQueue.add(Event.DrainRequestEvent);
} else {
// TODO: do we want to be smarter and return server error response?
throw new ServletException((Throwable) event.getData());
}
break;
}
case DrainRequest:
{
for (int i = 0; i < 10; i++) {
final int actualLen = _is.read(buf);
if (actualLen < 0) {
_requestReadFinished = true;
break;
}
}
if (!_requestReadFinished) {
// add self back to event queue and give others a chance to run
_eventQueue.add(Event.DrainRequestEvent);
}
break;
}
case ForceExit:
{
_forceExit = true;
break;
}
default:
throw new IllegalStateException("Unknown event type:" + event.getEventType());
}
}
}
Aggregations