use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetPartitionNamesUnpartitioned.
@Test
public void testGetPartitionNamesUnpartitioned() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableUnpartitioned);
tableHandle = applyFilter(metadata, tableHandle, Constraint.alwaysTrue());
ConnectorTableProperties properties = metadata.getTableProperties(newSession(), tableHandle);
assertExpectedTableProperties(properties, new ConnectorTableProperties());
assertExpectedPartitions(tableHandle, tableUnpartitionedPartitions);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetPartitionsWithBindings.
@Test
public void testGetPartitionsWithBindings() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tablePartitionFormat);
Constraint constraint = new Constraint(TupleDomain.withColumnDomains(ImmutableMap.of(intColumn, Domain.singleValue(BIGINT, 5L))));
tableHandle = applyFilter(metadata, tableHandle, constraint);
ConnectorTableProperties properties = metadata.getTableProperties(newSession(), tableHandle);
assertExpectedTableProperties(properties, tablePartitionFormatProperties);
assertExpectedPartitions(tableHandle, tablePartitionFormatPartitions);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method doTestBucketedTableEvolution.
private void doTestBucketedTableEvolution(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
int rowCount = 100;
//
// Produce a table with 8 buckets.
// The table has 3 partitions of 3 different bucket count (4, 8, 16).
createEmptyTable(tableName, storageFormat, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty()), new Column("name", HIVE_STRING, Optional.empty())), ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty())), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 4, ImmutableList.of())));
// write a 4-bucket partition
MaterializedResult.Builder bucket4Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket4Builder.row((long) i, String.valueOf(i), "four"));
insertData(tableName, bucket4Builder.build());
// write a 16-bucket partition
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 16, ImmutableList.of())));
MaterializedResult.Builder bucket16Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket16Builder.row((long) i, String.valueOf(i), "sixteen"));
insertData(tableName, bucket16Builder.build());
// write an 8-bucket partition
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 8, ImmutableList.of())));
MaterializedResult.Builder bucket8Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket8Builder.row((long) i, String.valueOf(i), "eight"));
insertData(tableName, bucket8Builder.build());
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// read entire table
List<ColumnHandle> columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values()).build();
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(0, 1, 2, 3, 4, 5, 6, 7), rowCount);
// read single bucket (table/logical bucket)
result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), NullableValue.of(INTEGER, 6L))), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
// read single bucket, without selecting the bucketing column (i.e. id column)
columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !"id".equals(((HiveColumnHandle) columnHandle).getName())).collect(toImmutableList())).build();
result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), NullableValue.of(INTEGER, 6L))), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHiveFileSystem method createTable.
private void createTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// begin creating the table
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write the records
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the table
metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
transaction.commit();
// Hack to work around the metastore not being configured for S3 or other FS.
// The metastore tries to validate the location when creating the
// table, which fails without explicit configuration for file system.
// We work around that by using a dummy location when creating the
// table and update it here to the correct location.
metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle(), false).getTargetPath().toString());
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
// verify the data
metadata.beginQuery(session);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, tableHandle);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
}
metadata.cleanupQuery(session);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHiveLocal method assertReadReturnsRowCount.
private void assertReadReturnsRowCount(HiveStorageFormat storageFormat, SchemaTableName tableName, int rowCount) throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEquals(result.getRowCount(), rowCount);
}
}
Aggregations