use of io.trino.spi.connector.SortingProperty in project trino by trinodb.
the class AbstractTestHive method doTestBucketedSortedTableEvolution.
private void doTestBucketedSortedTableEvolution(SchemaTableName tableName) throws Exception {
int rowCount = 100;
// Create table and populate it with 3 partitions with different sort orders but same bucketing
createEmptyTable(tableName, ORC, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty()), new Column("name", HIVE_STRING, Optional.empty())), ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty())), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 4, ImmutableList.of(new SortingColumn("id", ASCENDING), new SortingColumn("name", ASCENDING)))));
// write a 4-bucket partition sorted by id, name
MaterializedResult.Builder sortedByIdNameBuilder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> sortedByIdNameBuilder.row((long) i, String.valueOf(i), "sorted_by_id_name"));
insertData(tableName, sortedByIdNameBuilder.build());
// write a 4-bucket partition sorted by name
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 4, ImmutableList.of(new SortingColumn("name", ASCENDING)))));
MaterializedResult.Builder sortedByNameBuilder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> sortedByNameBuilder.row((long) i, String.valueOf(i), "sorted_by_name"));
insertData(tableName, sortedByNameBuilder.build());
// write a 4-bucket partition sorted by id
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 4, ImmutableList.of(new SortingColumn("id", ASCENDING)))));
MaterializedResult.Builder sortedByIdBuilder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> sortedByIdBuilder.row((long) i, String.valueOf(i), "sorted_by_id"));
insertData(tableName, sortedByIdBuilder.build());
ConnectorTableHandle tableHandle;
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
tableHandle = getTableHandle(metadata, tableName);
// read entire table
List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().collect(toImmutableList());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEquals(result.getRowCount(), 300);
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession(ImmutableMap.of("propagate_table_scan_sorting_properties", true));
metadata.beginQuery(session);
Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle);
// verify local sorting property
ConnectorTableProperties properties = metadata.getTableProperties(session, tableHandle);
assertEquals(properties.getLocalProperties(), ImmutableList.of(new SortingProperty<>(columnHandles.get("id"), ASC_NULLS_FIRST)));
// read on a entire table should fail with exception
assertThatThrownBy(() -> readTable(transaction, tableHandle, ImmutableList.copyOf(columnHandles.values()), session, TupleDomain.all(), OptionalInt.empty(), Optional.empty())).isInstanceOf(TrinoException.class).hasMessage("Hive table (%s) sorting by [id] is not compatible with partition (pk=sorted_by_name) sorting by [name]." + " This restriction can be avoided by disabling propagate_table_scan_sorting_properties.", tableName);
// read only the partitions with sorting that is compatible to table sorting
MaterializedResult result = readTable(transaction, tableHandle, ImmutableList.copyOf(columnHandles.values()), session, TupleDomain.withColumnDomains(ImmutableMap.of(columnHandles.get("pk"), Domain.create(ValueSet.of(VARCHAR, utf8Slice("sorted_by_id_name"), utf8Slice("sorted_by_id")), false))), OptionalInt.empty(), Optional.empty());
assertEquals(result.getRowCount(), 200);
}
}
use of io.trino.spi.connector.SortingProperty in project trino by trinodb.
the class PhoenixMetadata method getTableProperties.
@Override
public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle table) {
JdbcTableHandle tableHandle = (JdbcTableHandle) table;
List<LocalProperty<ColumnHandle>> sortingProperties = tableHandle.getSortOrder().map(properties -> properties.stream().map(item -> (LocalProperty<ColumnHandle>) new SortingProperty<ColumnHandle>(item.getColumn(), item.getSortOrder())).collect(toImmutableList())).orElse(ImmutableList.of());
return new ConnectorTableProperties(TupleDomain.all(), Optional.empty(), Optional.empty(), Optional.empty(), sortingProperties);
}
use of io.trino.spi.connector.SortingProperty in project trino by trinodb.
the class TpchMetadata method getTableProperties.
@Override
public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle table) {
TpchTableHandle tableHandle = (TpchTableHandle) table;
Optional<ConnectorTablePartitioning> tablePartitioning = Optional.empty();
Optional<Set<ColumnHandle>> partitioningColumns = Optional.empty();
List<LocalProperty<ColumnHandle>> localProperties = ImmutableList.of();
Map<String, ColumnHandle> columns = getColumnHandles(session, tableHandle);
if (partitioningEnabled && tableHandle.getTableName().equals(TpchTable.ORDERS.getTableName())) {
ColumnHandle orderKeyColumn = columns.get(columnNaming.getName(OrderColumn.ORDER_KEY));
tablePartitioning = Optional.of(new ConnectorTablePartitioning(new TpchPartitioningHandle(TpchTable.ORDERS.getTableName(), calculateTotalRows(OrderGenerator.SCALE_BASE, tableHandle.getScaleFactor())), ImmutableList.of(orderKeyColumn)));
partitioningColumns = Optional.of(ImmutableSet.of(orderKeyColumn));
localProperties = ImmutableList.of(new SortingProperty<>(orderKeyColumn, SortOrder.ASC_NULLS_FIRST));
} else if (partitioningEnabled && tableHandle.getTableName().equals(TpchTable.LINE_ITEM.getTableName())) {
ColumnHandle orderKeyColumn = columns.get(columnNaming.getName(LineItemColumn.ORDER_KEY));
tablePartitioning = Optional.of(new ConnectorTablePartitioning(new TpchPartitioningHandle(TpchTable.ORDERS.getTableName(), calculateTotalRows(OrderGenerator.SCALE_BASE, tableHandle.getScaleFactor())), ImmutableList.of(orderKeyColumn)));
partitioningColumns = Optional.of(ImmutableSet.of(orderKeyColumn));
localProperties = ImmutableList.of(new SortingProperty<>(orderKeyColumn, SortOrder.ASC_NULLS_FIRST), new SortingProperty<>(columns.get(columnNaming.getName(LineItemColumn.LINE_NUMBER)), SortOrder.ASC_NULLS_FIRST));
}
TupleDomain<ColumnHandle> constraint = tableHandle.getConstraint();
if (predicatePushdownEnabled && constraint.isAll()) {
if (tableHandle.getTableName().equals(TpchTable.ORDERS.getTableName())) {
constraint = toTupleDomain(ImmutableMap.of(toColumnHandle(OrderColumn.ORDER_STATUS), orderStatusNullableValues));
} else if (tableHandle.getTableName().equals(TpchTable.PART.getTableName())) {
constraint = toTupleDomain(ImmutableMap.of(toColumnHandle(PartColumn.CONTAINER), partContainerNullableValues, toColumnHandle(PartColumn.TYPE), partTypeNullableValues));
}
}
return new ConnectorTableProperties(constraint, tablePartitioning, partitioningColumns, Optional.empty(), localProperties);
}
Aggregations