use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestRaptorConnector method createTable.
private long createTable(String name) {
ConnectorTransactionHandle transaction = beginTransaction();
connector.getMetadata(SESSION, transaction).createTable(SESSION, new ConnectorTableMetadata(new SchemaTableName("test", name), ImmutableList.of(new ColumnMetadata("id", BIGINT))), false);
connector.commit(transaction);
transaction = beginTransaction();
ConnectorTableHandle tableHandle = getTableHandle(connector.getMetadata(SESSION, transaction), name);
connector.commit(transaction);
return ((RaptorTableHandle) tableHandle).getTableId();
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestRaptorConnector method assertSplitShard.
private void assertSplitShard(Type temporalType, String min, String max, int expectedSplits) throws Exception {
ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(new RaptorSessionProperties(new StorageManagerConfig()).getSessionProperties()).build();
ConnectorTransactionHandle transaction = beginTransaction();
connector.getMetadata(SESSION, transaction).createTable(SESSION, new ConnectorTableMetadata(new SchemaTableName("test", "test"), ImmutableList.of(new ColumnMetadata("id", BIGINT), new ColumnMetadata("time", temporalType)), ImmutableMap.of(TEMPORAL_COLUMN_PROPERTY, "time")), false);
connector.commit(transaction);
ConnectorTransactionHandle txn1 = beginTransaction();
ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(SESSION, txn1), "test");
ConnectorInsertTableHandle insertTableHandle = connector.getMetadata(SESSION, txn1).beginInsert(session, handle1);
ConnectorPageSink raptorPageSink = connector.getPageSinkProvider().createPageSink(txn1, session, insertTableHandle);
Object timestamp1 = null;
Object timestamp2 = null;
if (temporalType.equals(TIMESTAMP_MILLIS)) {
timestamp1 = SqlTimestamp.newInstance(3, castToShortTimestamp(TIMESTAMP_MILLIS.getPrecision(), min), 0);
timestamp2 = SqlTimestamp.newInstance(3, castToShortTimestamp(TIMESTAMP_MILLIS.getPrecision(), max), 0);
} else if (temporalType.equals(DATE)) {
timestamp1 = new SqlDate(parseDate(min));
timestamp2 = new SqlDate(parseDate(max));
}
Page inputPage = MaterializedResult.resultBuilder(session, ImmutableList.of(BIGINT, temporalType)).row(1L, timestamp1).row(2L, timestamp2).build().toPage();
raptorPageSink.appendPage(inputPage);
Collection<Slice> shards = raptorPageSink.finish().get();
assertEquals(shards.size(), expectedSplits);
connector.getMetadata(session, txn1).dropTable(session, handle1);
connector.commit(txn1);
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestTpcdsMetadataStatistics method testTableStatsDetails.
@Test
public void testTableStatsDetails() {
SchemaTableName schemaTableName = new SchemaTableName("sf1", Table.CALL_CENTER.getName());
ConnectorTableHandle tableHandle = metadata.getTableHandle(session, schemaTableName);
TableStatistics tableStatistics = metadata.getTableStatistics(session, tableHandle, alwaysTrue());
estimateAssertion.assertClose(tableStatistics.getRowCount(), Estimate.of(6), "Row count does not match");
// all columns have stats
Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle);
for (ColumnHandle column : columnHandles.values()) {
assertTrue(tableStatistics.getColumnStatistics().containsKey(column));
assertNotNull(tableStatistics.getColumnStatistics().get(column));
}
// identifier
assertColumnStatistics(tableStatistics.getColumnStatistics().get(columnHandles.get(CallCenterColumn.CC_CALL_CENTER_SK.getName())), ColumnStatistics.builder().setNullsFraction(Estimate.of(0)).setDistinctValuesCount(Estimate.of(6)).setRange(new DoubleRange(1, 6)).build());
// varchar
assertColumnStatistics(tableStatistics.getColumnStatistics().get(columnHandles.get(CallCenterColumn.CC_CALL_CENTER_ID.getName())), ColumnStatistics.builder().setNullsFraction(Estimate.of(0)).setDistinctValuesCount(Estimate.of(3)).setDataSize(Estimate.of(48.0)).build());
// char
assertColumnStatistics(tableStatistics.getColumnStatistics().get(columnHandles.get(CallCenterColumn.CC_ZIP.getName())), ColumnStatistics.builder().setNullsFraction(Estimate.of(0)).setDistinctValuesCount(Estimate.of(1)).setDataSize(Estimate.of(5.0)).build());
// decimal
assertColumnStatistics(tableStatistics.getColumnStatistics().get(columnHandles.get(CallCenterColumn.CC_GMT_OFFSET.getName())), ColumnStatistics.builder().setNullsFraction(Estimate.of(0)).setDistinctValuesCount(Estimate.of(1)).setRange(new DoubleRange(-5, -5)).build());
// date
assertColumnStatistics(tableStatistics.getColumnStatistics().get(columnHandles.get(CallCenterColumn.CC_REC_START_DATE.getName())), ColumnStatistics.builder().setNullsFraction(Estimate.of(0)).setDistinctValuesCount(Estimate.of(4)).setRange(new DoubleRange(10227L, 11688L)).build());
// only null values
assertColumnStatistics(tableStatistics.getColumnStatistics().get(columnHandles.get(CallCenterColumn.CC_CLOSED_DATE_SK.getName())), ColumnStatistics.builder().setNullsFraction(Estimate.of(1)).setDistinctValuesCount(Estimate.of(0)).build());
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestTpchMetadata method testPartTypeAndPartContainerPredicatePushdown.
@Test
public void testPartTypeAndPartContainerPredicatePushdown() {
TpchTableHandle tableHandle = tpchMetadata.getTableHandle(session, new SchemaTableName("sf1", PART.getTableName()));
TupleDomain<ColumnHandle> domain;
ConstraintApplicationResult<ConnectorTableHandle> result;
domain = fixedValueTupleDomain(tpchMetadata, PartColumn.TYPE, utf8Slice("SMALL BRUSHED COPPER"));
result = tpchMetadata.applyFilter(session, tableHandle, new Constraint(domain, convertToPredicate(domain, PartColumn.TYPE), Set.of(tpchMetadata.toColumnHandle(PartColumn.TYPE)))).get();
assertTupleDomainEquals(result.getRemainingFilter(), TupleDomain.all(), session);
assertTupleDomainEquals(filterOutColumnFromPredicate(((TpchTableHandle) result.getHandle()).getConstraint(), tpchMetadata.toColumnHandle(PartColumn.CONTAINER)), domain, session);
domain = fixedValueTupleDomain(tpchMetadata, PartColumn.TYPE, utf8Slice("UNKNOWN"));
result = tpchMetadata.applyFilter(session, tableHandle, new Constraint(domain, convertToPredicate(domain, PartColumn.TYPE), Set.of(tpchMetadata.toColumnHandle(PartColumn.TYPE)))).get();
assertTupleDomainEquals(result.getRemainingFilter(), TupleDomain.all(), session);
assertTupleDomainEquals(((TpchTableHandle) result.getHandle()).getConstraint(), TupleDomain.none(), session);
domain = fixedValueTupleDomain(tpchMetadata, PartColumn.CONTAINER, utf8Slice("SM BAG"));
result = tpchMetadata.applyFilter(session, tableHandle, new Constraint(domain, convertToPredicate(domain, PartColumn.CONTAINER), Set.of(tpchMetadata.toColumnHandle(PartColumn.CONTAINER)))).get();
assertTupleDomainEquals(result.getRemainingFilter(), TupleDomain.all(), session);
assertTupleDomainEquals(filterOutColumnFromPredicate(((TpchTableHandle) result.getHandle()).getConstraint(), tpchMetadata.toColumnHandle(PartColumn.TYPE)), domain, session);
domain = fixedValueTupleDomain(tpchMetadata, PartColumn.CONTAINER, utf8Slice("UNKNOWN"));
result = tpchMetadata.applyFilter(session, tableHandle, new Constraint(domain, convertToPredicate(domain, PartColumn.CONTAINER), Set.of(tpchMetadata.toColumnHandle(PartColumn.CONTAINER)))).get();
assertTupleDomainEquals(result.getRemainingFilter(), TupleDomain.all(), session);
assertTupleDomainEquals(((TpchTableHandle) result.getHandle()).getConstraint(), TupleDomain.none(), session);
domain = fixedValueTupleDomain(tpchMetadata, PartColumn.TYPE, utf8Slice("SMALL BRUSHED COPPER"), PartColumn.CONTAINER, utf8Slice("SM BAG"));
result = tpchMetadata.applyFilter(session, tableHandle, new Constraint(domain, convertToPredicate(domain, PartColumn.CONTAINER), Set.of(tpchMetadata.toColumnHandle(PartColumn.CONTAINER)))).get();
assertTupleDomainEquals(result.getRemainingFilter(), TupleDomain.all(), session);
assertTupleDomainEquals(((TpchTableHandle) result.getHandle()).getConstraint(), domain, session);
domain = fixedValueTupleDomain(tpchMetadata, PartColumn.TYPE, utf8Slice("UNKNOWN"), PartColumn.CONTAINER, utf8Slice("UNKNOWN"));
result = tpchMetadata.applyFilter(session, tableHandle, new Constraint(domain, convertToPredicate(domain, PartColumn.TYPE, PartColumn.CONTAINER), Set.of(tpchMetadata.toColumnHandle(PartColumn.TYPE), tpchMetadata.toColumnHandle(PartColumn.CONTAINER)))).get();
assertTupleDomainEquals(result.getRemainingFilter(), TupleDomain.all(), session);
assertTupleDomainEquals(((TpchTableHandle) result.getHandle()).getConstraint(), TupleDomain.none(), session);
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class IcebergMetadata method getTableProperties.
@Override
public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle tableHandle) {
IcebergTableHandle table = (IcebergTableHandle) tableHandle;
if (table.getSnapshotId().isEmpty()) {
// TupleDomain.none() as the predicate
return new ConnectorTableProperties(TupleDomain.none(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of());
}
Table icebergTable = catalog.loadTable(session, table.getSchemaTableName());
// Extract identity partition fields that are present in all partition specs, for creating the discrete predicates.
Set<Integer> partitionSourceIds = identityPartitionColumnsInAllSpecs(icebergTable);
TupleDomain<IcebergColumnHandle> enforcedPredicate = table.getEnforcedPredicate();
DiscretePredicates discretePredicates = null;
if (!partitionSourceIds.isEmpty()) {
// Extract identity partition columns
Map<Integer, IcebergColumnHandle> columns = getColumns(icebergTable.schema(), typeManager).stream().filter(column -> partitionSourceIds.contains(column.getId())).collect(toImmutableMap(IcebergColumnHandle::getId, Function.identity()));
Supplier<List<FileScanTask>> lazyFiles = Suppliers.memoize(() -> {
TableScan tableScan = icebergTable.newScan().useSnapshot(table.getSnapshotId().get()).filter(toIcebergExpression(enforcedPredicate)).includeColumnStats();
try (CloseableIterable<FileScanTask> iterator = tableScan.planFiles()) {
return ImmutableList.copyOf(iterator);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
});
Iterable<FileScanTask> files = () -> lazyFiles.get().iterator();
Iterable<TupleDomain<ColumnHandle>> discreteTupleDomain = Iterables.transform(files, fileScan -> {
// Extract partition values in the data file
Map<Integer, Optional<String>> partitionColumnValueStrings = getPartitionKeys(fileScan);
Map<ColumnHandle, NullableValue> partitionValues = partitionSourceIds.stream().filter(partitionColumnValueStrings::containsKey).collect(toImmutableMap(columns::get, columnId -> {
IcebergColumnHandle column = columns.get(columnId);
Object prestoValue = deserializePartitionValue(column.getType(), partitionColumnValueStrings.get(columnId).orElse(null), column.getName());
return NullableValue.of(column.getType(), prestoValue);
}));
return TupleDomain.fromFixedValues(partitionValues);
});
discretePredicates = new DiscretePredicates(columns.values().stream().map(ColumnHandle.class::cast).collect(toImmutableList()), discreteTupleDomain);
}
return new ConnectorTableProperties(// over all tableScan.planFiles() and caching partition values in table handle.
enforcedPredicate.transformKeys(ColumnHandle.class::cast), // TODO: implement table partitioning
Optional.empty(), Optional.empty(), Optional.ofNullable(discretePredicates), ImmutableList.of());
}
Aggregations