use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class AbstractTestHive method insertPartitionedBucketedTableLayout.
protected void insertPartitionedBucketedTableLayout(boolean transactional) throws Exception {
SchemaTableName tableName = temporaryTable("empty_partitioned_table");
try {
Column partitioningColumn = new Column("column2", HIVE_LONG, Optional.empty());
List<Column> columns = ImmutableList.of(new Column("column1", HIVE_STRING, Optional.empty()), partitioningColumn);
HiveBucketProperty bucketProperty = new HiveBucketProperty(ImmutableList.of("column1"), BUCKETING_V1, 4, ImmutableList.of());
createEmptyTable(tableName, ORC, columns, ImmutableList.of(partitioningColumn), Optional.of(bucketProperty), transactional);
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
Optional<ConnectorTableLayout> insertLayout = metadata.getInsertLayout(session, tableHandle);
assertTrue(insertLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(bucketProperty.getBucketingVersion(), bucketProperty.getBucketCount(), ImmutableList.of(HIVE_STRING), OptionalInt.empty(), true);
assertEquals(insertLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(insertLayout.get().getPartitionColumns(), ImmutableList.of("column1", "column2"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 32);
assertTrue(connectorBucketNodeMap.hasFixedMapping());
assertEquals(connectorBucketNodeMap.getFixedMapping().size(), 32);
}
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class AbstractTestHive method testCreatePartitionedBucketedTableLayout.
@Test
public void testCreatePartitionedBucketedTableLayout() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
Optional<ConnectorTableLayout> newTableLayout = metadata.getNewTableLayout(session, new ConnectorTableMetadata(new SchemaTableName("schema", "table"), ImmutableList.of(new ColumnMetadata("column1", BIGINT), new ColumnMetadata("column2", BIGINT)), ImmutableMap.of(PARTITIONED_BY_PROPERTY, ImmutableList.of("column2"), BUCKETED_BY_PROPERTY, ImmutableList.of("column1"), BUCKET_COUNT_PROPERTY, 10, SORTED_BY_PROPERTY, ImmutableList.of())));
assertTrue(newTableLayout.isPresent());
ConnectorPartitioningHandle partitioningHandle = new HivePartitioningHandle(BUCKETING_V1, 10, ImmutableList.of(HIVE_LONG), OptionalInt.empty(), true);
assertEquals(newTableLayout.get().getPartitioning(), Optional.of(partitioningHandle));
assertEquals(newTableLayout.get().getPartitionColumns(), ImmutableList.of("column1", "column2"));
ConnectorBucketNodeMap connectorBucketNodeMap = nodePartitioningProvider.getBucketNodeMap(transaction.getTransactionHandle(), session, partitioningHandle);
assertEquals(connectorBucketNodeMap.getBucketCount(), 32);
assertTrue(connectorBucketNodeMap.hasFixedMapping());
assertEquals(connectorBucketNodeMap.getFixedMapping().size(), 32);
}
}
use of io.trino.spi.connector.ConnectorTableLayout in project trino by trinodb.
the class TestRaptorMetadata method testCreateBucketedTableAsSelect.
@Test
public void testCreateBucketedTableAsSelect() {
assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));
ConnectorTableMetadata ordersTable = getOrdersTable(ImmutableMap.of(BUCKET_COUNT_PROPERTY, 32, BUCKETED_ON_PROPERTY, ImmutableList.of("orderkey", "custkey")));
ConnectorTableLayout layout = metadata.getNewTableLayout(SESSION, ordersTable).get();
assertEquals(layout.getPartitionColumns(), ImmutableList.of("orderkey", "custkey"));
assertTrue(layout.getPartitioning().isPresent());
assertInstanceOf(layout.getPartitioning().get(), RaptorPartitioningHandle.class);
RaptorPartitioningHandle partitioning = (RaptorPartitioningHandle) layout.getPartitioning().get();
assertEquals(partitioning.getDistributionId(), 1);
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(SESSION, ordersTable, Optional.of(layout));
metadata.finishCreateTable(SESSION, outputHandle, ImmutableList.of(), ImmutableList.of());
ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
assertInstanceOf(tableHandle, RaptorTableHandle.class);
RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
assertEquals(raptorTableHandle.getTableId(), 1);
long tableId = raptorTableHandle.getTableId();
MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);
assertTableColumnsEqual(metadataDao.listBucketColumns(tableId), ImmutableList.of(new TableColumn(DEFAULT_TEST_ORDERS, "orderkey", BIGINT, 1, 0, OptionalInt.of(0), OptionalInt.empty(), false), new TableColumn(DEFAULT_TEST_ORDERS, "custkey", BIGINT, 2, 1, OptionalInt.of(1), OptionalInt.empty(), false)));
assertEquals(raptorTableHandle.getBucketCount(), OptionalInt.of(32));
assertEquals(getTableDistributionId(tableId), Long.valueOf(1));
metadata.dropTable(SESSION, tableHandle);
}
Aggregations