use of io.trino.spi.Page in project trino by trinodb.
the class InformationSchemaPageSource method getNextPage.
@Override
public Page getNextPage() {
if (isFinished()) {
return null;
}
if (pages.isEmpty()) {
buildPages();
}
Page page = pages.poll();
if (page == null) {
return null;
}
memoryUsageBytes -= page.getRetainedSizeInBytes();
Page outputPage = projection.apply(page);
completedBytes += outputPage.getSizeInBytes();
return outputPage;
}
use of io.trino.spi.Page in project trino by trinodb.
the class RuleStatsSystemTable method pageSource.
@Override
public ConnectorPageSource pageSource(ConnectorTransactionHandle transactionHandle, ConnectorSession session, TupleDomain<Integer> constraint) {
checkState(ruleStatsRecorder.isPresent(), "Rule stats system table can return results only on coordinator");
Map<Class<?>, RuleStats> ruleStats = ruleStatsRecorder.get().getStats();
int positionCount = ruleStats.size();
Map<String, BlockBuilder> blockBuilders = ruleStatsTable.getColumns().stream().collect(toImmutableMap(ColumnMetadata::getName, column -> column.getType().createBlockBuilder(null, positionCount)));
for (Map.Entry<Class<?>, RuleStats> entry : ruleStats.entrySet()) {
RuleStats stats = entry.getValue();
VARCHAR.writeString(blockBuilders.get("rule_name"), entry.getKey().getSimpleName());
BIGINT.writeLong(blockBuilders.get("invocations"), stats.getInvocations());
BIGINT.writeLong(blockBuilders.get("matches"), stats.getHits());
BIGINT.writeLong(blockBuilders.get("failures"), stats.getFailures());
DOUBLE.writeDouble(blockBuilders.get("average_time"), stats.getTime().getAvg());
BlockBuilder mapWriter = blockBuilders.get("time_distribution_percentiles").beginBlockEntry();
for (Map.Entry<Double, Double> percentile : stats.getTime().getPercentiles().entrySet()) {
DOUBLE.writeDouble(mapWriter, percentile.getKey());
DOUBLE.writeDouble(mapWriter, percentile.getValue());
}
blockBuilders.get("time_distribution_percentiles").closeEntry();
}
Block[] blocks = ruleStatsTable.getColumns().stream().map(column -> blockBuilders.get(column.getName()).build()).toArray(Block[]::new);
return new FixedPageSource(ImmutableList.of(new Page(positionCount, blocks)));
}
use of io.trino.spi.Page in project trino by trinodb.
the class PagePartitioner method partitionBySingleRleValue.
private void partitionBySingleRleValue(Page page, int position, Page partitionFunctionArgs, IntArrayList[] partitionPositions) {
// copy all positions because all hash function args are the same for every position
if (nullChannel != -1 && page.getBlock(nullChannel).isNull(0)) {
verify(page.getBlock(nullChannel) instanceof RunLengthEncodedBlock, "null channel is not RunLengthEncodedBlock", page.getBlock(nullChannel));
// all positions are null
int[] allPositions = integersInRange(position, page.getPositionCount());
for (IntList partitionPosition : partitionPositions) {
partitionPosition.addElements(position, allPositions);
}
} else {
// extract rle page to prevent JIT profile pollution
Page rlePage = extractRlePage(partitionFunctionArgs);
int partition = partitionFunction.getPartition(rlePage, 0);
IntArrayList positions = partitionPositions[partition];
for (int i = position; i < page.getPositionCount(); i++) {
positions.add(i);
}
}
}
use of io.trino.spi.Page in project trino by trinodb.
the class PagePartitioner method partitionPositions.
private IntArrayList[] partitionPositions(Page page) {
verify(page.getPositionCount() > 0, "position count is 0");
IntArrayList[] partitionPositions = initPositions(page);
int position;
// Handle "any row" replication outside the inner loop processing
if (replicatesAnyRow && !hasAnyRowBeenReplicated) {
for (IntList partitionPosition : partitionPositions) {
partitionPosition.add(0);
}
hasAnyRowBeenReplicated = true;
position = 1;
} else {
position = 0;
}
Page partitionFunctionArgs = getPartitionFunctionArguments(page);
if (partitionFunctionArgs.getChannelCount() > 0 && onlyRleBlocks(partitionFunctionArgs)) {
// we need at least one Rle block since with no blocks partition function
// can return a different value per invocation (e.g. RoundRobinBucketFunction)
partitionBySingleRleValue(page, position, partitionFunctionArgs, partitionPositions);
} else if (partitionFunctionArgs.getChannelCount() == 1 && isDictionaryProcessingFaster(partitionFunctionArgs.getBlock(0))) {
partitionBySingleDictionary(page, position, partitionFunctionArgs, partitionPositions);
} else {
partitionGeneric(page, position, aPosition -> partitionFunction.getPartition(partitionFunctionArgs, aPosition), partitionPositions);
}
return partitionPositions;
}
use of io.trino.spi.Page in project trino by trinodb.
the class PagePartitioner method flush.
public void flush(boolean force) {
try (PagesSerde.PagesSerdeContext context = serde.newContext()) {
// add all full pages to output buffer
for (int partition = 0; partition < pageBuilders.length; partition++) {
PageBuilder partitionPageBuilder = pageBuilders[partition];
if (!partitionPageBuilder.isEmpty() && (force || partitionPageBuilder.isFull())) {
Page pagePartition = partitionPageBuilder.build();
partitionPageBuilder.reset();
operatorContext.recordOutput(pagePartition.getSizeInBytes(), pagePartition.getPositionCount());
outputBuffer.enqueue(partition, splitAndSerializePage(context, pagePartition));
pagesAdded.incrementAndGet();
rowsAdded.addAndGet(pagePartition.getPositionCount());
}
}
}
}
Aggregations