use of co.cask.http.ChunkResponder in project cdap by caskdata.
the class StreamFetchHandler method fetch.
/**
* Handler for the HTTP API {@code /streams/[stream_name]/events?start=[start_ts]&end=[end_ts]&limit=[event_limit]}
* <p>
* Responds with:
* <ul>
* <li>404 if stream does not exist</li>
* <li>204 if no event in the given start/end time range exists</li>
* <li>200 if there is are one or more events</li>
* </ul>
* </p>
* <p>
* Response body is a JSON array of the StreamEvent object.
* </p>
*
* @see StreamEventTypeAdapter StreamEventTypeAdapter for the format of the StreamEvent object
*/
@GET
@Path("/{stream}/events")
public void fetch(HttpRequest request, final HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("stream") String stream, @QueryParam("start") @DefaultValue("0") String start, @QueryParam("end") @DefaultValue("9223372036854775807") String end, @QueryParam("limit") @DefaultValue("2147483647") final int limitEvents) throws Exception {
long startTime = TimeMathParser.parseTime(start, TimeUnit.MILLISECONDS);
long endTime = TimeMathParser.parseTime(end, TimeUnit.MILLISECONDS);
StreamId streamId = new StreamId(namespaceId, stream);
if (!verifyGetEventsRequest(streamId, startTime, endTime, limitEvents, responder)) {
return;
}
// Make sure the user has READ permission on the stream since getConfig doesn't check for the same.
authorizationEnforcer.enforce(streamId, authenticationContext.getPrincipal(), Action.READ);
final StreamConfig streamConfig = streamAdmin.getConfig(streamId);
long now = System.currentTimeMillis();
startTime = Math.max(startTime, now - streamConfig.getTTL());
endTime = Math.min(endTime, now);
final long streamStartTime = startTime;
final long streamEndTime = endTime;
impersonator.doAs(streamId, new Callable<Void>() {
@Override
public Void call() throws Exception {
int limit = limitEvents;
// Create the stream event reader
try (FileReader<StreamEventOffset, Iterable<StreamFileOffset>> reader = createReader(streamConfig, streamStartTime)) {
TimeRangeReadFilter readFilter = new TimeRangeReadFilter(streamStartTime, streamEndTime);
List<StreamEvent> events = Lists.newArrayListWithCapacity(100);
// Reads the first batch of events from the stream.
int eventsRead = readEvents(reader, events, limit, readFilter);
// If empty already, return 204 no content
if (eventsRead <= 0) {
responder.sendStatus(HttpResponseStatus.NO_CONTENT);
return null;
}
// Send with chunk response, as we don't want to buffer all events in memory to determine the content-length.
ChunkResponder chunkResponder = responder.sendChunkStart(HttpResponseStatus.OK, new DefaultHttpHeaders().set(HttpHeaderNames.CONTENT_TYPE, "application/json; charset=utf-8"));
ByteBuf buffer = Unpooled.buffer();
JsonWriter jsonWriter = new JsonWriter(new OutputStreamWriter(new ByteBufOutputStream(buffer), StandardCharsets.UTF_8));
// Response is an array of stream event
jsonWriter.beginArray();
while (limit > 0 && eventsRead > 0) {
limit -= eventsRead;
for (StreamEvent event : events) {
GSON.toJson(event, StreamEvent.class, jsonWriter);
jsonWriter.flush();
// If exceeded chunk size limit, send a new chunk.
if (buffer.readableBytes() >= CHUNK_SIZE) {
// If the connect is closed, sendChunk will throw IOException.
// No need to handle the exception as it will just propagated back to the netty-http library
// and it will handle it.
// Need to copy the buffer because the buffer will get reused and send chunk is an async operation
chunkResponder.sendChunk(buffer.copy());
buffer.clear();
}
}
events.clear();
if (limit > 0) {
eventsRead = readEvents(reader, events, limit, readFilter);
}
}
jsonWriter.endArray();
jsonWriter.close();
// Send the last chunk that still has data
if (buffer.isReadable()) {
// No need to copy the last chunk, since the buffer will not be reused
chunkResponder.sendChunk(buffer);
}
Closeables.closeQuietly(chunkResponder);
}
return null;
}
});
}
Aggregations