use of org.eclipse.jetty.util.Callback in project jetty.project by eclipse.
the class HttpClientTest method testSmallAsyncContent.
@Test
public void testSmallAsyncContent() throws Exception {
start(new AbstractHandler() {
@Override
public void handle(String target, org.eclipse.jetty.server.Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
ServletOutputStream output = response.getOutputStream();
output.write(65);
output.flush();
output.write(66);
}
});
final AtomicInteger contentCount = new AtomicInteger();
final AtomicReference<Callback> callbackRef = new AtomicReference<>();
final AtomicReference<CountDownLatch> contentLatch = new AtomicReference<>(new CountDownLatch(1));
final CountDownLatch completeLatch = new CountDownLatch(1);
client.newRequest("localhost", connector.getLocalPort()).scheme(scheme).onResponseContentAsync(new Response.AsyncContentListener() {
@Override
public void onContent(Response response, ByteBuffer content, Callback callback) {
contentCount.incrementAndGet();
callbackRef.set(callback);
contentLatch.get().countDown();
}
}).send(new Response.CompleteListener() {
@Override
public void onComplete(Result result) {
completeLatch.countDown();
}
});
Assert.assertTrue(contentLatch.get().await(5, TimeUnit.SECONDS));
Callback callback = callbackRef.get();
// Wait a while to be sure that the parsing does not proceed.
TimeUnit.MILLISECONDS.sleep(1000);
Assert.assertEquals(1, contentCount.get());
// Succeed the content callback to proceed with parsing.
callbackRef.set(null);
contentLatch.set(new CountDownLatch(1));
callback.succeeded();
Assert.assertTrue(contentLatch.get().await(5, TimeUnit.SECONDS));
callback = callbackRef.get();
// Wait a while to be sure that the parsing does not proceed.
TimeUnit.MILLISECONDS.sleep(1000);
Assert.assertEquals(2, contentCount.get());
Assert.assertEquals(1, completeLatch.getCount());
// Succeed the content callback to proceed with parsing.
callbackRef.set(null);
contentLatch.set(new CountDownLatch(1));
callback.succeeded();
Assert.assertTrue(completeLatch.await(5, TimeUnit.SECONDS));
Assert.assertEquals(2, contentCount.get());
}
use of org.eclipse.jetty.util.Callback in project jetty.project by eclipse.
the class ResourceService method sendData.
/* ------------------------------------------------------------ */
protected boolean sendData(HttpServletRequest request, HttpServletResponse response, boolean include, final HttpContent content, Enumeration<String> reqRanges) throws IOException {
final long content_length = content.getContentLengthValue();
// Get the output stream (or writer)
OutputStream out = null;
boolean written;
try {
out = response.getOutputStream();
// has something already written to the response?
written = out instanceof HttpOutput ? ((HttpOutput) out).isWritten() : true;
} catch (IllegalStateException e) {
out = new WriterOutputStream(response.getWriter());
// there may be data in writer buffer, so assume written
written = true;
}
if (LOG.isDebugEnabled())
LOG.debug(String.format("sendData content=%s out=%s async=%b", content, out, request.isAsyncSupported()));
if (reqRanges == null || !reqRanges.hasMoreElements() || content_length < 0) {
// if there were no ranges, send entire entity
if (include) {
// write without headers
content.getResource().writeTo(out, 0, content_length);
} else // else if we can't do a bypass write because of wrapping
if (written || !(out instanceof HttpOutput)) {
// write normally
putHeaders(response, content, written ? -1 : 0);
ByteBuffer buffer = content.getIndirectBuffer();
if (buffer != null)
BufferUtil.writeTo(buffer, out);
else
content.getResource().writeTo(out, 0, content_length);
} else // else do a bypass write
{
// write the headers
putHeaders(response, content, 0);
// write the content asynchronously if supported
if (request.isAsyncSupported() && content.getContentLengthValue() > response.getBufferSize()) {
final AsyncContext context = request.startAsync();
context.setTimeout(0);
((HttpOutput) out).sendContent(content, new Callback() {
@Override
public void succeeded() {
context.complete();
content.release();
}
@Override
public void failed(Throwable x) {
if (x instanceof IOException)
LOG.debug(x);
else
LOG.warn(x);
context.complete();
content.release();
}
@Override
public String toString() {
return String.format("ResourceService@%x$CB", ResourceService.this.hashCode());
}
});
return false;
}
// otherwise write content blocking
((HttpOutput) out).sendContent(content);
}
} else {
// Parse the satisfiable ranges
List<InclusiveByteRange> ranges = InclusiveByteRange.satisfiableRanges(reqRanges, content_length);
// if there are no satisfiable ranges, send 416 response
if (ranges == null || ranges.size() == 0) {
putHeaders(response, content, 0);
response.setStatus(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE);
response.setHeader(HttpHeader.CONTENT_RANGE.asString(), InclusiveByteRange.to416HeaderRangeString(content_length));
content.getResource().writeTo(out, 0, content_length);
return true;
}
// since were here now), send that range with a 216 response
if (ranges.size() == 1) {
InclusiveByteRange singleSatisfiableRange = ranges.get(0);
long singleLength = singleSatisfiableRange.getSize(content_length);
putHeaders(response, content, singleLength);
response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
if (!response.containsHeader(HttpHeader.DATE.asString()))
response.addDateHeader(HttpHeader.DATE.asString(), System.currentTimeMillis());
response.setHeader(HttpHeader.CONTENT_RANGE.asString(), singleSatisfiableRange.toHeaderRangeString(content_length));
content.getResource().writeTo(out, singleSatisfiableRange.getFirst(content_length), singleLength);
return true;
}
// multiple non-overlapping valid ranges cause a multipart
// 216 response which does not require an overall
// content-length header
//
putHeaders(response, content, -1);
String mimetype = (content == null ? null : content.getContentTypeValue());
if (mimetype == null)
LOG.warn("Unknown mimetype for " + request.getRequestURI());
MultiPartOutputStream multi = new MultiPartOutputStream(out);
response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);
if (!response.containsHeader(HttpHeader.DATE.asString()))
response.addDateHeader(HttpHeader.DATE.asString(), System.currentTimeMillis());
// If the request has a "Request-Range" header then we need to
// send an old style multipart/x-byteranges Content-Type. This
// keeps Netscape and acrobat happy. This is what Apache does.
String ctp;
if (request.getHeader(HttpHeader.REQUEST_RANGE.asString()) != null)
ctp = "multipart/x-byteranges; boundary=";
else
ctp = "multipart/byteranges; boundary=";
response.setContentType(ctp + multi.getBoundary());
InputStream in = content.getResource().getInputStream();
long pos = 0;
// calculate the content-length
int length = 0;
String[] header = new String[ranges.size()];
for (int i = 0; i < ranges.size(); i++) {
InclusiveByteRange ibr = ranges.get(i);
header[i] = ibr.toHeaderRangeString(content_length);
length += ((i > 0) ? 2 : 0) + 2 + multi.getBoundary().length() + 2 + (mimetype == null ? 0 : HttpHeader.CONTENT_TYPE.asString().length() + 2 + mimetype.length()) + 2 + HttpHeader.CONTENT_RANGE.asString().length() + 2 + header[i].length() + 2 + 2 + (ibr.getLast(content_length) - ibr.getFirst(content_length)) + 1;
}
length += 2 + 2 + multi.getBoundary().length() + 2 + 2;
response.setContentLength(length);
for (int i = 0; i < ranges.size(); i++) {
InclusiveByteRange ibr = ranges.get(i);
multi.startPart(mimetype, new String[] { HttpHeader.CONTENT_RANGE + ": " + header[i] });
long start = ibr.getFirst(content_length);
long size = ibr.getSize(content_length);
if (in != null) {
// Handle non cached resource
if (start < pos) {
in.close();
in = content.getResource().getInputStream();
pos = 0;
}
if (pos < start) {
in.skip(start - pos);
pos = start;
}
IO.copy(in, multi, size);
pos += size;
} else
// Handle cached resource
content.getResource().writeTo(multi, start, size);
}
if (in != null)
in.close();
multi.close();
}
return true;
}
use of org.eclipse.jetty.util.Callback in project jetty.project by eclipse.
the class FlowControlStalledTest method testSessionStalledIsInvokedOnlyOnce.
@Test
public void testSessionStalledIsInvokedOnlyOnce() throws Exception {
AtomicReference<CountDownLatch> stallLatch = new AtomicReference<>(new CountDownLatch(1));
CountDownLatch unstallLatch = new CountDownLatch(1);
start(() -> new BufferingFlowControlStrategy(0.5f) {
@Override
public void onSessionStalled(ISession session) {
super.onSessionStalled(session);
stallLatch.get().countDown();
}
@Override
protected void onSessionUnstalled(ISession session) {
super.onSessionUnstalled(session);
unstallLatch.countDown();
}
}, new ServerSessionListener.Adapter() {
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame) {
MetaData.Request request = (MetaData.Request) frame.getMetaData();
MetaData.Response response = new MetaData.Response(HttpVersion.HTTP_2, HttpStatus.OK_200, new HttpFields());
if (request.getURIString().endsWith("/stall")) {
stream.headers(new HeadersFrame(stream.getId(), response, null, false), new Callback() {
@Override
public void succeeded() {
// Send a large chunk of data so the session gets stalled.
ByteBuffer data = ByteBuffer.allocate(FlowControlStrategy.DEFAULT_WINDOW_SIZE + 1);
stream.data(new DataFrame(stream.getId(), data, true), NOOP);
}
});
} else {
stream.headers(new HeadersFrame(stream.getId(), response, null, true), Callback.NOOP);
}
return null;
}
});
// Use a large stream window so that only the session gets stalled.
client.setInitialStreamRecvWindow(5 * FlowControlStrategy.DEFAULT_WINDOW_SIZE);
Session session = newClient(new Session.Listener.Adapter() {
@Override
public Map<Integer, Integer> onPreface(Session session) {
Map<Integer, Integer> settings = new HashMap<>();
settings.put(SettingsFrame.INITIAL_WINDOW_SIZE, client.getInitialStreamRecvWindow());
return settings;
}
});
CountDownLatch latch = new CountDownLatch(1);
Queue<Callback> callbacks = new ArrayDeque<>();
MetaData.Request request = newRequest("GET", "/stall", new HttpFields());
session.newStream(new HeadersFrame(request, null, true), new Promise.Adapter<>(), new Stream.Listener.Adapter() {
@Override
public void onData(Stream stream, DataFrame frame, Callback callback) {
callbacks.offer(callback);
if (frame.isEndStream())
latch.countDown();
}
});
Assert.assertTrue(stallLatch.get().await(5, TimeUnit.SECONDS));
// The session is now stalled, check that writing a second stream
// does not result in the session be notified again of being stalled.
stallLatch.set(new CountDownLatch(1));
request = newRequest("GET", "/", new HttpFields());
session.newStream(new HeadersFrame(request, null, true), new Promise.Adapter<>(), new Stream.Listener.Adapter());
Assert.assertFalse(stallLatch.get().await(1, TimeUnit.SECONDS));
// Consume all data.
while (!latch.await(10, TimeUnit.MILLISECONDS)) {
Callback callback = callbacks.poll();
if (callback != null)
callback.succeeded();
}
// Make sure the unstall callback is invoked.
Assert.assertTrue(unstallLatch.await(5, TimeUnit.SECONDS));
}
use of org.eclipse.jetty.util.Callback in project jetty.project by eclipse.
the class FlowControlStalledTest method testStreamStalledIsInvokedOnlyOnce.
@Test
public void testStreamStalledIsInvokedOnlyOnce() throws Exception {
AtomicReference<CountDownLatch> stallLatch = new AtomicReference<>(new CountDownLatch(1));
CountDownLatch unstallLatch = new CountDownLatch(1);
start(() -> new BufferingFlowControlStrategy(0.5f) {
@Override
public void onStreamStalled(IStream stream) {
super.onStreamStalled(stream);
stallLatch.get().countDown();
}
@Override
protected void onStreamUnstalled(IStream stream) {
super.onStreamUnstalled(stream);
unstallLatch.countDown();
}
}, new ServerSessionListener.Adapter() {
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame frame) {
MetaData.Request request = (MetaData.Request) frame.getMetaData();
MetaData.Response response = new MetaData.Response(HttpVersion.HTTP_2, HttpStatus.OK_200, new HttpFields());
if (request.getURIString().endsWith("/stall")) {
stream.headers(new HeadersFrame(stream.getId(), response, null, false), new Callback() {
@Override
public void succeeded() {
// Send a large chunk of data so the stream gets stalled.
ByteBuffer data = ByteBuffer.allocate(FlowControlStrategy.DEFAULT_WINDOW_SIZE + 1);
stream.data(new DataFrame(stream.getId(), data, true), NOOP);
}
});
} else {
stream.headers(new HeadersFrame(stream.getId(), response, null, true), Callback.NOOP);
}
return null;
}
});
// Use a large session window so that only the stream gets stalled.
client.setInitialSessionRecvWindow(5 * FlowControlStrategy.DEFAULT_WINDOW_SIZE);
Session client = newClient(new Session.Listener.Adapter());
CountDownLatch latch = new CountDownLatch(1);
Queue<Callback> callbacks = new ArrayDeque<>();
MetaData.Request request = newRequest("GET", "/stall", new HttpFields());
client.newStream(new HeadersFrame(request, null, true), new Promise.Adapter<>(), new Stream.Listener.Adapter() {
@Override
public void onData(Stream stream, DataFrame frame, Callback callback) {
callbacks.offer(callback);
if (frame.isEndStream())
latch.countDown();
}
});
Assert.assertTrue(stallLatch.get().await(5, TimeUnit.SECONDS));
// First stream is now stalled, check that writing a second stream
// does not result in the first be notified again of being stalled.
stallLatch.set(new CountDownLatch(1));
request = newRequest("GET", "/", new HttpFields());
client.newStream(new HeadersFrame(request, null, true), new Promise.Adapter<>(), new Stream.Listener.Adapter());
Assert.assertFalse(stallLatch.get().await(1, TimeUnit.SECONDS));
// Consume all data.
while (!latch.await(10, TimeUnit.MILLISECONDS)) {
Callback callback = callbacks.poll();
if (callback != null)
callback.succeeded();
}
// Make sure the unstall callback is invoked.
Assert.assertTrue(unstallLatch.await(5, TimeUnit.SECONDS));
}
use of org.eclipse.jetty.util.Callback in project jetty.project by eclipse.
the class FlowControlStrategyTest method testFlowControlWithConcurrentSettings.
@Test
public void testFlowControlWithConcurrentSettings() throws Exception {
// Initial window is 64 KiB. We allow the client to send 1024 B
// then we change the window to 512 B. At this point, the client
// must stop sending data (although the initial window allows it).
final int size = 512;
// We get 3 data frames: the first of 1024 and 2 of 512 each
// after the flow control window has been reduced.
final CountDownLatch dataLatch = new CountDownLatch(3);
final AtomicReference<Callback> callbackRef = new AtomicReference<>();
start(new ServerSessionListener.Adapter() {
@Override
public Stream.Listener onNewStream(Stream stream, HeadersFrame requestFrame) {
HttpFields fields = new HttpFields();
MetaData.Response response = new MetaData.Response(HttpVersion.HTTP_2, 200, fields);
HeadersFrame responseFrame = new HeadersFrame(stream.getId(), response, null, true);
stream.headers(responseFrame, Callback.NOOP);
return new Stream.Listener.Adapter() {
private final AtomicInteger dataFrames = new AtomicInteger();
@Override
public void onData(Stream stream, DataFrame frame, Callback callback) {
dataLatch.countDown();
int dataFrameCount = dataFrames.incrementAndGet();
if (dataFrameCount == 1) {
callbackRef.set(callback);
Map<Integer, Integer> settings = new HashMap<>();
settings.put(SettingsFrame.INITIAL_WINDOW_SIZE, size);
stream.getSession().settings(new SettingsFrame(settings, false), Callback.NOOP);
// Do not succeed the callback here.
} else if (dataFrameCount > 1) {
// Consume the data.
callback.succeeded();
}
}
};
}
});
// Two SETTINGS frames, the initial one and the one we send from the server.
final CountDownLatch settingsLatch = new CountDownLatch(2);
Session session = newClient(new Session.Listener.Adapter() {
@Override
public void onSettings(Session session, SettingsFrame frame) {
settingsLatch.countDown();
}
});
MetaData.Request request = newRequest("POST", new HttpFields());
FuturePromise<Stream> promise = new FuturePromise<>();
session.newStream(new HeadersFrame(request, null, false), promise, new Stream.Listener.Adapter());
Stream stream = promise.get(5, TimeUnit.SECONDS);
// Send first chunk that exceeds the window.
Callback.Completable completable = new Callback.Completable();
stream.data(new DataFrame(stream.getId(), ByteBuffer.allocate(size * 2), false), completable);
settingsLatch.await(5, TimeUnit.SECONDS);
completable.thenRun(() -> {
// Send the second chunk of data, must not arrive since we're flow control stalled on the client.
stream.data(new DataFrame(stream.getId(), ByteBuffer.allocate(size * 2), true), Callback.NOOP);
});
Assert.assertFalse(dataLatch.await(1, TimeUnit.SECONDS));
// Consume the data arrived to server, this will resume flow control on the client.
callbackRef.get().succeeded();
Assert.assertTrue(dataLatch.await(5, TimeUnit.SECONDS));
}
Aggregations