use of io.airlift.slice.OutputStreamSliceOutput in project presto by prestodb.
the class FileSingleStreamSpiller method writePages.
private void writePages(Iterator<Page> pageIterator) {
checkState(writable, "Spilling no longer allowed. The spiller has been made non-writable on first read for subsequent reads to be consistent");
checkState(!committed, "Spilling no longer allowed. Spill file is already committed");
try (SliceOutput output = new OutputStreamSliceOutput(targetFile.newOutputStream(APPEND), BUFFER_SIZE)) {
while (pageIterator.hasNext()) {
Page page = pageIterator.next();
spilledPagesInMemorySize += page.getSizeInBytes();
// page serialization requires page.getSizeInBytes() + Integer.BYTES to fit in an integer
splitPage(page, DEFAULT_MAX_PAGE_SIZE_IN_BYTES).stream().map(serde::serialize).forEach(serializedPage -> {
long pageSize = serializedPage.getSizeInBytes();
localSpillContext.updateBytes(pageSize);
spillerStats.addToTotalSpilledBytes(pageSize);
writeSerializedPage(output, serializedPage);
});
}
} catch (UncheckedIOException | IOException e) {
throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to spill pages", e);
}
}
use of io.airlift.slice.OutputStreamSliceOutput in project presto by prestodb.
the class PagesResponseWriter method writeTo.
@Override
public void writeTo(List<SerializedPage> serializedPages, Class<?> type, Type genericType, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream output) throws IOException, WebApplicationException {
try {
SliceOutput sliceOutput = new OutputStreamSliceOutput(output);
writeSerializedPages(sliceOutput, serializedPages);
// We use flush instead of close, because the underlying stream would be closed and that is not allowed.
sliceOutput.flush();
} catch (UncheckedIOException e) {
// This is not a "server" problem so we don't want to log this
if (!(e.getCause() instanceof EOFException)) {
throw e;
}
}
}
use of io.airlift.slice.OutputStreamSliceOutput in project presto by prestodb.
the class FileFragmentResultCacheManager method cachePages.
private void cachePages(CacheKey key, Path path, List<Page> pages, long resultSize) {
try {
Files.createFile(path);
try (SliceOutput output = new OutputStreamSliceOutput(newOutputStream(path, APPEND))) {
writePages(pagesSerdeFactory.createPagesSerde(), output, pages.iterator());
long resultPhysicalBytes = output.size();
cache.put(key, new CacheEntry(path, resultPhysicalBytes));
fragmentCacheStats.incrementCacheEntries();
fragmentCacheStats.addCacheSizeInBytes(resultPhysicalBytes);
} catch (UncheckedIOException | IOException e) {
log.warn(e, "%s encountered an error while writing to path %s", Thread.currentThread().getName(), path);
tryDeleteFile(path);
}
} catch (UncheckedIOException | IOException e) {
log.warn(e, "%s encountered an error while writing to path %s", Thread.currentThread().getName(), path);
tryDeleteFile(path);
} finally {
fragmentCacheStats.addInFlightBytes(-resultSize);
}
}
use of io.airlift.slice.OutputStreamSliceOutput in project presto by prestodb.
the class RcFileTester method writeRcFileColumnNew.
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception {
OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile));
AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader()));
RcFileWriter writer = new RcFileWriter(output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, // use a smaller size to create more row groups
new DataSize(100, KILOBYTE), new DataSize(200, KILOBYTE), true);
BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024);
while (values.hasNext()) {
Object value = values.next();
writeValue(type, blockBuilder, value);
}
writer.write(new Page(blockBuilder.build()));
writer.close();
writer.validate(new FileRcFileDataSource(outputFile));
return new DataSize(output.size(), BYTE);
}
Aggregations