use of io.trino.orc.stream.OrcDataReader in project trino by trinodb.
the class AbstractOrcDataSource method readLargeDiskRanges.
private <K> Map<K, OrcDataReader> readLargeDiskRanges(Map<K, DiskRange> diskRanges) {
if (diskRanges.isEmpty()) {
return ImmutableMap.of();
}
ImmutableMap.Builder<K, OrcDataReader> slices = ImmutableMap.builder();
for (Entry<K, DiskRange> entry : diskRanges.entrySet()) {
DiskRange diskRange = entry.getValue();
slices.put(entry.getKey(), new DiskOrcDataReader(diskRange));
}
return slices.buildOrThrow();
}
use of io.trino.orc.stream.OrcDataReader in project trino by trinodb.
the class MemoryOrcDataSource method readFully.
@Override
public final <K> Map<K, OrcDataReader> readFully(Map<K, DiskRange> diskRanges) {
requireNonNull(diskRanges, "diskRanges is null");
if (diskRanges.isEmpty()) {
return ImmutableMap.of();
}
ImmutableMap.Builder<K, OrcDataReader> slices = ImmutableMap.builder();
for (Entry<K, DiskRange> entry : diskRanges.entrySet()) {
DiskRange diskRange = entry.getValue();
Slice slice = readFully(diskRange.getOffset(), diskRange.getLength());
// retained memory is reported by this data source, so it should not be declared in the reader
slices.put(entry.getKey(), new MemoryOrcDataReader(id, slice, 0));
}
return slices.buildOrThrow();
}
use of io.trino.orc.stream.OrcDataReader in project trino by trinodb.
the class AbstractOrcDataSource method readSmallDiskRanges.
private <K> Map<K, OrcDataReader> readSmallDiskRanges(Map<K, DiskRange> diskRanges) throws IOException {
if (diskRanges.isEmpty()) {
return ImmutableMap.of();
}
Iterable<DiskRange> mergedRanges = mergeAdjacentDiskRanges(diskRanges.values(), options.getMaxMergeDistance(), options.getMaxBufferSize());
ImmutableMap.Builder<K, OrcDataReader> slices = ImmutableMap.builder();
if (options.isLazyReadSmallRanges()) {
for (DiskRange mergedRange : mergedRanges) {
LazyBufferLoader mergedRangeLazyLoader = new LazyBufferLoader(mergedRange);
for (Entry<K, DiskRange> diskRangeEntry : diskRanges.entrySet()) {
DiskRange diskRange = diskRangeEntry.getValue();
if (mergedRange.contains(diskRange)) {
slices.put(diskRangeEntry.getKey(), new MergedOrcDataReader(id, diskRange, mergedRangeLazyLoader));
}
}
}
} else {
Map<DiskRange, Slice> buffers = new LinkedHashMap<>();
for (DiskRange mergedRange : mergedRanges) {
// read full range in one request
Slice buffer = readFully(mergedRange.getOffset(), mergedRange.getLength());
buffers.put(mergedRange, buffer);
}
for (Entry<K, DiskRange> entry : diskRanges.entrySet()) {
slices.put(entry.getKey(), new MemoryOrcDataReader(id, getDiskRangeSlice(entry.getValue(), buffers), entry.getValue().getLength()));
}
}
Map<K, OrcDataReader> sliceStreams = slices.buildOrThrow();
verify(sliceStreams.keySet().equals(diskRanges.keySet()));
return sliceStreams;
}
use of io.trino.orc.stream.OrcDataReader in project trino by trinodb.
the class CachingOrcDataSource method readFully.
@Override
public <K> Map<K, OrcDataReader> readFully(Map<K, DiskRange> diskRanges) throws IOException {
ImmutableMap.Builder<K, OrcDataReader> builder = ImmutableMap.builder();
// will not result in eviction of cache that otherwise could have served any of the DiskRanges provided.
for (Map.Entry<K, DiskRange> entry : diskRanges.entrySet()) {
DiskRange diskRange = entry.getValue();
Slice buffer = readFully(diskRange.getOffset(), diskRange.getLength());
builder.put(entry.getKey(), new MemoryOrcDataReader(dataSource.getId(), buffer, buffer.length()));
}
return builder.buildOrThrow();
}
Aggregations