use of javax.ws.rs.core.StreamingOutput in project druid by druid-io.
the class QueryResource method doPost.
@POST
@Produces({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE })
@Consumes({ MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE })
public Response doPost(final InputStream in, @QueryParam("pretty") final String pretty, // used to get request content-type,Accept header, remote address and auth-related headers
@Context final HttpServletRequest req) throws IOException {
final QueryLifecycle queryLifecycle = queryLifecycleFactory.factorize();
Query<?> query = null;
final ResourceIOReaderWriter ioReaderWriter = createResourceIOReaderWriter(req, pretty != null);
final String currThreadName = Thread.currentThread().getName();
try {
queryLifecycle.initialize(readQuery(req, in, ioReaderWriter));
query = queryLifecycle.getQuery();
final String queryId = query.getId();
final String queryThreadName = StringUtils.format("%s[%s_%s_%s]", currThreadName, query.getType(), query.getDataSource().getTableNames(), queryId);
Thread.currentThread().setName(queryThreadName);
if (log.isDebugEnabled()) {
log.debug("Got query [%s]", query);
}
final Access authResult = queryLifecycle.authorize(req);
if (!authResult.isAllowed()) {
throw new ForbiddenException(authResult.toString());
}
final QueryLifecycle.QueryResponse queryResponse = queryLifecycle.execute();
final Sequence<?> results = queryResponse.getResults();
final ResponseContext responseContext = queryResponse.getResponseContext();
final String prevEtag = getPreviousEtag(req);
if (prevEtag != null && prevEtag.equals(responseContext.getEntityTag())) {
queryLifecycle.emitLogsAndMetrics(null, req.getRemoteAddr(), -1);
successfulQueryCount.incrementAndGet();
return Response.notModified().build();
}
final Yielder<?> yielder = Yielders.each(results);
try {
boolean shouldFinalize = QueryContexts.isFinalize(query, true);
boolean serializeDateTimeAsLong = QueryContexts.isSerializeDateTimeAsLong(query, false) || (!shouldFinalize && QueryContexts.isSerializeDateTimeAsLongInner(query, false));
final ObjectWriter jsonWriter = ioReaderWriter.getResponseWriter().newOutputWriter(queryLifecycle.getToolChest(), queryLifecycle.getQuery(), serializeDateTimeAsLong);
Response.ResponseBuilder responseBuilder = Response.ok(new StreamingOutput() {
@Override
public void write(OutputStream outputStream) throws WebApplicationException {
Exception e = null;
CountingOutputStream os = new CountingOutputStream(outputStream);
try {
// json serializer will always close the yielder
jsonWriter.writeValue(os, yielder);
// Some types of OutputStream suppress flush errors in the .close() method.
os.flush();
os.close();
} catch (Exception ex) {
e = ex;
log.noStackTrace().error(ex, "Unable to send query response.");
throw new RuntimeException(ex);
} finally {
Thread.currentThread().setName(currThreadName);
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), os.getCount());
if (e == null) {
successfulQueryCount.incrementAndGet();
} else {
failedQueryCount.incrementAndGet();
}
}
}
}, ioReaderWriter.getResponseWriter().getResponseType()).header("X-Druid-Query-Id", queryId);
transferEntityTag(responseContext, responseBuilder);
DirectDruidClient.removeMagicResponseContextFields(responseContext);
// Limit the response-context header, see https://github.com/apache/druid/issues/2331
// Note that Response.ResponseBuilder.header(String key,Object value).build() calls value.toString()
// and encodes the string using ASCII, so 1 char is = 1 byte
final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(jsonMapper, responseContextConfig.getMaxResponseContextHeaderSize());
if (serializationResult.isTruncated()) {
final String logToPrint = StringUtils.format("Response Context truncated for id [%s]. Full context is [%s].", queryId, serializationResult.getFullResult());
if (responseContextConfig.shouldFailOnTruncatedResponseContext()) {
log.error(logToPrint);
throw new QueryInterruptedException(new TruncatedResponseContextException("Serialized response context exceeds the max size[%s]", responseContextConfig.getMaxResponseContextHeaderSize()), selfNode.getHostAndPortToUse());
} else {
log.warn(logToPrint);
}
}
return responseBuilder.header(HEADER_RESPONSE_CONTEXT, serializationResult.getResult()).build();
} catch (QueryException e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw e;
} catch (Exception e) {
// make sure to close yielder if anything happened before starting to serialize the response.
yielder.close();
throw new RuntimeException(e);
} finally {
// do not close yielder here, since we do not want to close the yielder prior to
// StreamingOutput having iterated over all the results
}
} catch (QueryInterruptedException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotError(e);
} catch (QueryTimeoutException timeout) {
timedOutQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(timeout, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotTimeout(timeout);
} catch (QueryCapacityExceededException cap) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(cap, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotLimited(cap);
} catch (QueryUnsupportedException unsupported) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(unsupported, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotUnsupported(unsupported);
} catch (BadJsonQueryException | ResourceLimitExceededException e) {
interruptedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
return ioReaderWriter.getResponseWriter().gotBadQuery(e);
} catch (ForbiddenException e) {
// send an error response if this is thrown.
throw e;
} catch (Exception e) {
failedQueryCount.incrementAndGet();
queryLifecycle.emitLogsAndMetrics(e, req.getRemoteAddr(), -1);
log.noStackTrace().makeAlert(e, "Exception handling request").addData("query", query != null ? jsonMapper.writeValueAsString(query) : "unparseable query").addData("peer", req.getRemoteAddr()).emit();
return ioReaderWriter.getResponseWriter().gotError(e);
} finally {
Thread.currentThread().setName(currThreadName);
}
}
use of javax.ws.rs.core.StreamingOutput in project druid by druid-io.
the class ShuffleResource method getPartition.
@GET
@Path("/task/{supervisorTaskId}/{subTaskId}/partition")
@Produces(MediaType.APPLICATION_OCTET_STREAM)
public Response getPartition(@PathParam("supervisorTaskId") String supervisorTaskId, @PathParam("subTaskId") String subTaskId, @QueryParam("startTime") String startTime, @QueryParam("endTime") String endTime, @QueryParam("bucketId") int bucketId) {
final Interval interval = new Interval(DateTimes.of(startTime), DateTimes.of(endTime));
final Optional<ByteSource> partitionFile = intermediaryDataManager.findPartitionFile(supervisorTaskId, subTaskId, interval, bucketId);
if (!partitionFile.isPresent()) {
final String errorMessage = StringUtils.format("Can't find the partition for supervisorTask[%s], subTask[%s], interval[%s], and bucketId[%s]", supervisorTaskId, subTaskId, interval, bucketId);
return Response.status(Status.NOT_FOUND).entity(errorMessage).build();
} else {
try {
long size = partitionFile.get().size();
shuffleMetrics.ifPresent(metrics -> metrics.shuffleRequested(supervisorTaskId, size));
} catch (IOException ioException) {
log.error("Failed to get length of file for supervisorTask[%s], subTask[%s], interval[%s], and bucketId[%s]", supervisorTaskId, subTaskId, interval, bucketId);
}
return Response.ok((StreamingOutput) output -> partitionFile.get().copyTo(output)).build();
}
}
use of javax.ws.rs.core.StreamingOutput in project textdb by TextDB.
the class DownloadFileResource method downloadExcelFile.
private Response downloadExcelFile(TupleSourceOperator tupleSource) {
ExcelSink excelSink = new ExcelSinkPredicate().newOperator();
excelSink.setInputOperator(tupleSource);
excelSink.open();
excelSink.collectAllTuples();
excelSink.close();
StreamingOutput fileStream = new StreamingOutput() {
@Override
public void write(OutputStream output) throws IOException, WebApplicationException {
byte[] data = Files.readAllBytes(excelSink.getFilePath());
output.write(data);
output.flush();
}
};
return Response.ok(fileStream, MediaType.APPLICATION_OCTET_STREAM).header("content-disposition", "attachment; filename=result.xlsx").build();
}
use of javax.ws.rs.core.StreamingOutput in project openolat by klemens.
the class ForumWebService method getAttachment.
/**
* Retrieves the attachment of the message
* @response.representation.200.mediaType application/octet-stream
* @response.representation.200.doc The portrait as image
* @response.representation.404.doc The identity or the portrait not found
* @param messageKey The identity key of the user being searched
* @param filename The name of the attachment
* @param request The REST request
* @return The attachment
*/
@GET
@Path("posts/{messageKey}/attachments/{filename}")
@Produces({ "*/*", MediaType.APPLICATION_OCTET_STREAM })
public Response getAttachment(@PathParam("messageKey") Long messageKey, @PathParam("filename") String filename, @Context Request request) {
// load message
Message mess = fom.loadMessage(messageKey);
if (mess == null) {
return Response.serverError().status(Status.NOT_FOUND).build();
}
if (!forum.equalsByPersistableKey(mess.getForum())) {
return Response.serverError().status(Status.CONFLICT).build();
}
VFSContainer container = fom.getMessageContainer(mess.getForum().getKey(), mess.getKey());
VFSItem item = container.resolve(filename);
if (item instanceof LocalFileImpl) {
// local file -> the length is given to the client which is good
Date lastModified = new Date(item.getLastModified());
Response.ResponseBuilder response = request.evaluatePreconditions(lastModified);
if (response == null) {
File attachment = ((LocalFileImpl) item).getBasefile();
String mimeType = WebappHelper.getMimeType(attachment.getName());
if (mimeType == null)
mimeType = "application/octet-stream";
response = Response.ok(attachment).lastModified(lastModified).type(mimeType).cacheControl(cc);
}
return response.build();
} else if (item instanceof VFSLeaf) {
// stream -> the length is not given to the client which is not nice
Date lastModified = new Date(item.getLastModified());
Response.ResponseBuilder response = request.evaluatePreconditions(lastModified);
if (response == null) {
StreamingOutput attachment = new VFSStreamingOutput((VFSLeaf) item);
String mimeType = WebappHelper.getMimeType(item.getName());
if (mimeType == null)
mimeType = "application/octet-stream";
response = Response.ok(attachment).lastModified(lastModified).type(mimeType).cacheControl(cc);
}
return response.build();
}
return Response.serverError().status(Status.NOT_FOUND).build();
}
use of javax.ws.rs.core.StreamingOutput in project plugin-vm by ligoj.
the class VmResourceTest method downloadNodeSchedulesReport.
@Test
public void downloadNodeSchedulesReport() throws Exception {
final VmResource resource = new VmResource();
applicationContext.getAutowireCapableBeanFactory().autowireBean(resource);
resource.locator = mockServicePluginLocator;
final AtomicReference<VmOperation> operation = new AtomicReference<>(null);
// The third call is skipped
Mockito.doNothing().when(mockVmTool).execute(ArgumentMatchers.argThat(new ArgumentMatcher<VmExecution>() {
@Override
public boolean matches(final VmExecution argument) {
argument.setOperation(operation.get());
return true;
}
}));
// Report without executions
ByteArrayOutputStream output = new ByteArrayOutputStream();
((StreamingOutput) resource.downloadNodeSchedulesReport("service:vm:test", "file1").getEntity()).write(output);
List<String> lines = IOUtils.readLines(new ByteArrayInputStream(output.toByteArray()), StandardCharsets.UTF_8);
Assertions.assertEquals(2, lines.size());
Assertions.assertEquals("subscription;project;projectKey;projectName;node;cron;operation;lastDateHMS;lastTimestamp;lastOperation;vm;lastTrigger;lastSucceed;lastStatusText;lastErrorText;nextDateHMS;nextTimestamp", lines.get(0));
// No last execution available
Assertions.assertTrue(lines.get(1).matches("\\d+;\\d+;gfi-gstack;gStack;service:vm:test:test;0 0 0 1 1 \\? 2050;OFF;;;;;;;;;2050/01/01 00:00:00;2524604400000"), "Was : " + lines.get(1));
output.close();
// Manual execution
operation.set(VmOperation.OFF);
Integer id = resource.execute(subscription, VmOperation.OFF);
vmExecutionRepository.findOneExpected(id).setStatusText("status1");
// Manual execution by schedule, by pass the security check
securityHelper.setUserName(SecurityHelper.SYSTEM_USERNAME);
final Subscription entity = subscriptionRepository.findOneExpected(subscription);
operation.set(VmOperation.SHUTDOWN);
id = resource.execute(entity, VmOperation.ON);
vmExecutionRepository.findOneExpected(id).setVm("vm1");
// This call will be skipped
operation.set(null);
Assertions.assertNull(resource.execute(entity, VmOperation.REBOOT));
// Restore the current user
initSpringSecurityContext(getAuthenticationName());
// Report contains only the last executions (OFF/SHUTDOWN/[REBOOT = skipped])
output = new ByteArrayOutputStream();
((StreamingOutput) resource.downloadNodeSchedulesReport("service:vm:test", "file1").getEntity()).write(output);
lines = IOUtils.readLines(new ByteArrayInputStream(output.toByteArray()), StandardCharsets.UTF_8);
Assertions.assertEquals(2, lines.size());
Assertions.assertTrue(lines.get(1).matches("\\d+;\\d+;gfi-gstack;gStack;service:vm:test:test;0 0 0 1 1 \\? 2050;OFF;.+;.+;SHUTDOWN;vm1;_system;true;;;2050/01/01 00:00:00;2524604400000"), "Was : " + lines.get(1));
// Next execution where schedule CRON has been updated
vmScheduleRepository.findBy("subscription.id", subscription).setCron("INVALID");
operation.set(VmOperation.SHUTDOWN);
id = resource.execute(entity, VmOperation.ON);
vmExecutionRepository.findOneExpected(id).setVm("vm1");
output = new ByteArrayOutputStream();
((StreamingOutput) resource.downloadNodeSchedulesReport("service:vm:test", "file1").getEntity()).write(output);
lines = IOUtils.readLines(new ByteArrayInputStream(output.toByteArray()), StandardCharsets.UTF_8);
Assertions.assertEquals(2, lines.size());
Assertions.assertTrue(lines.get(1).matches("\\d+;\\d+;gfi-gstack;gStack;service:vm:test:test;INVALID;OFF;.+;.+;SHUTDOWN;vm1;fdaugan;true;;;ERROR;ERROR"));
// Add another schedule to the same subscription, with an execution
final VmSchedule schedule = new VmSchedule();
schedule.setOperation(VmOperation.ON);
schedule.setCron("0 0 0 1 1 ? 2049");
schedule.setSubscription(entity);
vmScheduleRepository.saveAndFlush(schedule);
final VmExecution execution = new VmExecution();
execution.setDate(new Date());
execution.setSubscription(entity);
execution.setTrigger("_system");
execution.setOperation(VmOperation.ON);
execution.setSucceed(true);
vmExecutionRepository.saveAndFlush(execution);
output = new ByteArrayOutputStream();
((StreamingOutput) resource.downloadNodeSchedulesReport("service:vm:test:test", "file1").getEntity()).write(output);
lines = IOUtils.readLines(new ByteArrayInputStream(output.toByteArray()), StandardCharsets.UTF_8);
Assertions.assertEquals(3, lines.size());
Assertions.assertTrue(lines.get(2).matches("\\d+;\\d+;gfi-gstack;gStack;service:vm:test:test;0 0 0 1 1 \\? 2049;ON;.+;.+;ON;;_system;true;;;2049/01/01 00:00:00;2493068400000"), "Was : " + lines.get(2));
}
Aggregations