use of io.trino.spi.connector.ConnectorPageSource in project trino by trinodb.
the class IcebergPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit connectorSplit, ConnectorTableHandle connectorTable, List<ColumnHandle> columns, DynamicFilter dynamicFilter) {
IcebergSplit split = (IcebergSplit) connectorSplit;
IcebergTableHandle table = (IcebergTableHandle) connectorTable;
List<IcebergColumnHandle> icebergColumns = columns.stream().map(IcebergColumnHandle.class::cast).collect(toImmutableList());
Map<Integer, Optional<String>> partitionKeys = split.getPartitionKeys();
List<IcebergColumnHandle> regularColumns = columns.stream().map(IcebergColumnHandle.class::cast).filter(column -> !partitionKeys.containsKey(column.getId())).collect(toImmutableList());
TupleDomain<IcebergColumnHandle> effectivePredicate = table.getUnenforcedPredicate().intersect(dynamicFilter.getCurrentPredicate().transformKeys(IcebergColumnHandle.class::cast)).simplify(ICEBERG_DOMAIN_COMPACTION_THRESHOLD);
HdfsContext hdfsContext = new HdfsContext(session);
ReaderPageSource dataPageSource = createDataPageSource(session, hdfsContext, new Path(split.getPath()), split.getStart(), split.getLength(), split.getFileSize(), split.getFileFormat(), regularColumns, effectivePredicate, table.getNameMappingJson().map(NameMappingParser::fromJson));
Optional<ReaderProjectionsAdapter> projectionsAdapter = dataPageSource.getReaderColumns().map(readerColumns -> new ReaderProjectionsAdapter(regularColumns, readerColumns, column -> ((IcebergColumnHandle) column).getType(), IcebergPageSourceProvider::applyProjection));
return new IcebergPageSource(icebergColumns, partitionKeys, dataPageSource.get(), projectionsAdapter);
}
use of io.trino.spi.connector.ConnectorPageSource in project trino by trinodb.
the class RaptorPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List<ColumnHandle> columns, DynamicFilter dynamicFilter) {
RaptorSplit raptorSplit = (RaptorSplit) split;
RaptorTableHandle raptorTable = (RaptorTableHandle) table;
OptionalInt bucketNumber = raptorSplit.getBucketNumber();
TupleDomain<RaptorColumnHandle> predicate = raptorTable.getConstraint();
OrcReaderOptions options = new OrcReaderOptions().withMaxMergeDistance(getReaderMaxMergeDistance(session)).withMaxBufferSize(getReaderMaxReadSize(session)).withStreamBufferSize(getReaderStreamBufferSize(session)).withTinyStripeThreshold(getReaderTinyStripeThreshold(session)).withLazyReadSmallRanges(isReaderLazyReadSmallRanges(session));
OptionalLong transactionId = raptorSplit.getTransactionId();
if (raptorSplit.getShardUuids().size() == 1) {
UUID shardUuid = raptorSplit.getShardUuids().iterator().next();
return createPageSource(shardUuid, bucketNumber, columns, predicate, options, transactionId);
}
Iterator<ConnectorPageSource> iterator = raptorSplit.getShardUuids().stream().map(shardUuid -> createPageSource(shardUuid, bucketNumber, columns, predicate, options, transactionId)).iterator();
return new ConcatPageSource(iterator);
}
use of io.trino.spi.connector.ConnectorPageSource in project trino by trinodb.
the class ThriftTpchService method getRowsSync.
private TrinoThriftPageResult getRowsSync(TrinoThriftId splitId, List<String> outputColumns, long maxBytes, TrinoThriftNullableToken nextToken) {
SplitInfo splitInfo = SPLIT_INFO_CODEC.fromJson(splitId.getId());
checkArgument(maxBytes >= DEFAULT_MAX_PAGE_SIZE_IN_BYTES, "requested maxBytes is too small");
ConnectorPageSource pageSource;
if (!splitInfo.isIndexSplit()) {
// normal scan
pageSource = createPageSource(splitInfo, outputColumns);
} else {
// index lookup
pageSource = createLookupPageSource(splitInfo, outputColumns);
}
return getRowsInternal(pageSource, splitInfo.getTableName(), outputColumns, nextToken.getToken());
}
use of io.trino.spi.connector.ConnectorPageSource in project trino by trinodb.
the class AbstractOperatorBenchmark method createTableScanOperator.
protected final OperatorFactory createTableScanOperator(int operatorId, PlanNodeId planNodeId, String tableName, String... columnNames) {
checkArgument(session.getCatalog().isPresent(), "catalog not set");
checkArgument(session.getSchema().isPresent(), "schema not set");
// look up the table
Metadata metadata = localQueryRunner.getMetadata();
QualifiedObjectName qualifiedTableName = new QualifiedObjectName(session.getCatalog().get(), session.getSchema().get(), tableName);
TableHandle tableHandle = metadata.getTableHandle(session, qualifiedTableName).orElse(null);
checkArgument(tableHandle != null, "Table '%s' does not exist", qualifiedTableName);
// lookup the columns
Map<String, ColumnHandle> allColumnHandles = metadata.getColumnHandles(session, tableHandle);
ImmutableList.Builder<ColumnHandle> columnHandlesBuilder = ImmutableList.builder();
for (String columnName : columnNames) {
ColumnHandle columnHandle = allColumnHandles.get(columnName);
checkArgument(columnHandle != null, "Table '%s' does not have a column '%s'", tableName, columnName);
columnHandlesBuilder.add(columnHandle);
}
List<ColumnHandle> columnHandles = columnHandlesBuilder.build();
// get the split for this table
Split split = getLocalQuerySplit(session, tableHandle);
return new OperatorFactory() {
@Override
public Operator createOperator(DriverContext driverContext) {
OperatorContext operatorContext = driverContext.addOperatorContext(operatorId, planNodeId, "BenchmarkSource");
ConnectorPageSource pageSource = localQueryRunner.getPageSourceManager().createPageSource(session, split, tableHandle, columnHandles, DynamicFilter.EMPTY);
return new PageSourceOperator(pageSource, operatorContext);
}
@Override
public void noMoreOperators() {
}
@Override
public OperatorFactory duplicate() {
throw new UnsupportedOperationException();
}
};
}
use of io.trino.spi.connector.ConnectorPageSource in project trino by trinodb.
the class AbstractTestHive method testPartitionSchemaNonCanonical.
// TODO coercion of non-canonical values should be supported
@Test(enabled = false)
public void testPartitionSchemaNonCanonical() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle table = getTableHandle(metadata, tablePartitionSchemaChangeNonCanonical);
ColumnHandle column = metadata.getColumnHandles(session, table).get("t_boolean");
Constraint constraint = new Constraint(TupleDomain.fromFixedValues(ImmutableMap.of(column, NullableValue.of(BOOLEAN, false))));
table = applyFilter(metadata, table, constraint);
HivePartition partition = getOnlyElement(((HiveTableHandle) table).getPartitions().orElseThrow(AssertionError::new));
assertEquals(getPartitionId(partition), "t_boolean=0");
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, table);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
ImmutableList<ColumnHandle> columnHandles = ImmutableList.of(column);
try (ConnectorPageSource ignored = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, table, columnHandles, DynamicFilter.EMPTY)) {
fail("expected exception");
} catch (TrinoException e) {
assertEquals(e.getErrorCode(), HIVE_INVALID_PARTITION_VALUE.toErrorCode());
}
}
}
Aggregations