use of io.trino.plugin.raptor.legacy.storage.StoragePageSink in project trino by trinodb.
the class ShardCompactor method compact.
public List<ShardInfo> compact(long transactionId, OptionalInt bucketNumber, Set<UUID> uuids, List<ColumnInfo> columns) throws IOException {
long start = System.nanoTime();
List<Long> columnIds = columns.stream().map(ColumnInfo::getColumnId).collect(toList());
List<Type> columnTypes = columns.stream().map(ColumnInfo::getType).collect(toList());
StoragePageSink storagePageSink = storageManager.createStoragePageSink(transactionId, bucketNumber, columnIds, columnTypes, false);
List<ShardInfo> shardInfos;
try {
shardInfos = compact(storagePageSink, bucketNumber, uuids, columnIds, columnTypes);
} catch (IOException | RuntimeException e) {
storagePageSink.rollback();
throw e;
}
updateStats(uuids.size(), shardInfos.size(), nanosSince(start).toMillis());
return shardInfos;
}
use of io.trino.plugin.raptor.legacy.storage.StoragePageSink in project trino by trinodb.
the class ShardCompactor method compactSorted.
public List<ShardInfo> compactSorted(long transactionId, OptionalInt bucketNumber, Set<UUID> uuids, List<ColumnInfo> columns, List<Long> sortColumnIds, List<SortOrder> sortOrders) throws IOException {
checkArgument(sortColumnIds.size() == sortOrders.size(), "sortColumnIds and sortOrders must be of the same size");
long start = System.nanoTime();
List<Long> columnIds = columns.stream().map(ColumnInfo::getColumnId).collect(toList());
List<Type> columnTypes = columns.stream().map(ColumnInfo::getType).collect(toList());
checkArgument(columnIds.containsAll(sortColumnIds), "sortColumnIds must be a subset of columnIds");
List<Integer> sortIndexes = sortColumnIds.stream().map(columnIds::indexOf).collect(toList());
Queue<SortedRowSource> rowSources = new PriorityQueue<>();
StoragePageSink outputPageSink = storageManager.createStoragePageSink(transactionId, bucketNumber, columnIds, columnTypes, false);
try {
for (UUID uuid : uuids) {
ConnectorPageSource pageSource = storageManager.getPageSource(uuid, bucketNumber, columnIds, columnTypes, TupleDomain.all(), orcReaderOptions);
SortedRowSource rowSource = new SortedRowSource(pageSource, columnTypes, sortIndexes, sortOrders, typeOperators);
rowSources.add(rowSource);
}
while (!rowSources.isEmpty()) {
SortedRowSource rowSource = rowSources.poll();
if (!rowSource.hasNext()) {
// rowSource is empty, close it
rowSource.close();
continue;
}
outputPageSink.appendRow(rowSource.next());
if (outputPageSink.isFull()) {
outputPageSink.flush();
}
rowSources.add(rowSource);
}
outputPageSink.flush();
List<ShardInfo> shardInfos = getFutureValue(outputPageSink.commit());
updateStats(uuids.size(), shardInfos.size(), nanosSince(start).toMillis());
return shardInfos;
} catch (IOException | RuntimeException e) {
outputPageSink.rollback();
throw e;
} finally {
rowSources.forEach(SortedRowSource::closeQuietly);
}
}
use of io.trino.plugin.raptor.legacy.storage.StoragePageSink in project trino by trinodb.
the class TestShardCompactor method createShards.
private static List<ShardInfo> createShards(StorageManager storageManager, List<Long> columnIds, List<Type> columnTypes, int shardCount) {
StoragePageSink sink = createStoragePageSink(storageManager, columnIds, columnTypes);
for (int i = 0; i < shardCount; i++) {
sink.appendPages(createPages(columnTypes));
sink.flush();
}
return getFutureValue(sink.commit());
}
Aggregations