use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class AbstractTestHive method testIllegalStorageFormatDuringTableScan.
/**
* During table scan, the illegal storage format for some specific table should not fail the whole table scan
*/
@Test
public void testIllegalStorageFormatDuringTableScan() {
SchemaTableName schemaTableName = temporaryTable("test_illegal_storage_format");
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
List<Column> columns = ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty()));
String tableOwner = session.getUser();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
LocationHandle locationHandle = locationService.forNewTable(transaction.getMetastore(), session, schemaName, tableName, Optional.empty());
Path targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
// create table whose storage format is null
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(tableOwner)).setTableType(TableType.MANAGED_TABLE.name()).setParameters(ImmutableMap.of(PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())).setDataColumns(columns).withStorage(storage -> storage.setLocation(targetPath.toString()).setStorageFormat(StorageFormat.createNullable(null, null, null)).setSerdeParameters(ImmutableMap.of()));
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
transaction.getMetastore().createTable(session, tableBuilder.build(), principalPrivileges, Optional.empty(), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
}
// to make sure it can still be retrieved instead of throwing exception.
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
Map<SchemaTableName, List<ColumnMetadata>> allColumns = listTableColumns(metadata, newSession(), new SchemaTablePrefix(schemaTableName.getSchemaName()));
assertTrue(allColumns.containsKey(schemaTableName));
} finally {
dropTable(schemaTableName);
}
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class AbstractTestHive method testDisallowQueryingOfIcebergTables.
@Test
public void testDisallowQueryingOfIcebergTables() {
ConnectorSession session = newSession();
SchemaTableName tableName = temporaryTable("trino_iceberg_table");
Table.Builder table = Table.builder().setDatabaseName(tableName.getSchemaName()).setTableName(tableName.getTableName()).setOwner(Optional.of(session.getUser())).setTableType(MANAGED_TABLE.name()).setPartitionColumns(List.of(new Column("a_partition_column", HIVE_INT, Optional.empty()))).setDataColumns(List.of(new Column("a_column", HIVE_STRING, Optional.empty()))).setParameter(ICEBERG_TABLE_TYPE_NAME, ICEBERG_TABLE_TYPE_VALUE);
table.getStorageBuilder().setStorageFormat(fromHiveStorageFormat(PARQUET)).setLocation(getTableDefaultLocation(metastoreClient.getDatabase(tableName.getSchemaName()).orElseThrow(), new HdfsContext(session.getIdentity()), hdfsEnvironment, tableName.getSchemaName(), tableName.getTableName()).toString());
metastoreClient.createTable(table.build(), NO_PRIVILEGES);
try {
// Verify that the table was created as a Iceberg table can't be queried in hive
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
assertThatThrownBy(() -> getTableHandle(metadata, tableName)).hasMessage(format("Cannot query Iceberg table '%s'", tableName));
}
// Verify the hidden `$properties` and `$partitions` hive system tables table handle can't be obtained for the Iceberg tables
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
SchemaTableName propertiesTableName = new SchemaTableName(tableName.getSchemaName(), format("%s$properties", tableName.getTableName()));
assertThat(metadata.getSystemTable(newSession(), propertiesTableName)).isEmpty();
SchemaTableName partitionsTableName = new SchemaTableName(tableName.getSchemaName(), format("%s$partitions", tableName.getTableName()));
assertThat(metadata.getSystemTable(newSession(), partitionsTableName)).isEmpty();
}
} finally {
// Clean up
metastoreClient.dropTable(tableName.getSchemaName(), tableName.getTableName(), true);
}
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class AbstractTestHive method doTestBucketedTableEvolution.
private void doTestBucketedTableEvolution(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
int rowCount = 100;
//
// Produce a table with 8 buckets.
// The table has 3 partitions of 3 different bucket count (4, 8, 16).
createEmptyTable(tableName, storageFormat, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty()), new Column("name", HIVE_STRING, Optional.empty())), ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty())), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 4, ImmutableList.of())));
// write a 4-bucket partition
MaterializedResult.Builder bucket4Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket4Builder.row((long) i, String.valueOf(i), "four"));
insertData(tableName, bucket4Builder.build());
// write a 16-bucket partition
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 16, ImmutableList.of())));
MaterializedResult.Builder bucket16Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket16Builder.row((long) i, String.valueOf(i), "sixteen"));
insertData(tableName, bucket16Builder.build());
// write an 8-bucket partition
alterBucketProperty(tableName, Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 8, ImmutableList.of())));
MaterializedResult.Builder bucket8Builder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR, VARCHAR);
IntStream.range(0, rowCount).forEach(i -> bucket8Builder.row((long) i, String.valueOf(i), "eight"));
insertData(tableName, bucket8Builder.build());
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// read entire table
List<ColumnHandle> columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values()).build();
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(0, 1, 2, 3, 4, 5, 6, 7), rowCount);
// read single bucket (table/logical bucket)
result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), NullableValue.of(INTEGER, 6L))), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
// read single bucket, without selecting the bucketing column (i.e. id column)
columnHandles = ImmutableList.<ColumnHandle>builder().addAll(metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !"id".equals(((HiveColumnHandle) columnHandle).getName())).collect(toImmutableList())).build();
result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.fromFixedValues(ImmutableMap.of(bucketColumnHandle(), NullableValue.of(INTEGER, 6L))), OptionalInt.empty(), Optional.empty());
assertBucketTableEvolutionResult(result, columnHandles, ImmutableSet.of(6), rowCount);
}
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class AbstractTestHiveFileFormats method createTestFileTrino.
public static FileSplit createTestFileTrino(String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> testColumns, ConnectorSession session, int numRows, HiveFileWriterFactory fileWriterFactory) {
// filter out partition keys, which are not written to the file
testColumns = testColumns.stream().filter(column -> !column.isPartitionKey()).collect(toImmutableList());
List<Type> types = testColumns.stream().map(TestColumn::getType).map(HiveType::valueOf).map(type -> type.getType(TESTING_TYPE_MANAGER)).collect(toList());
PageBuilder pageBuilder = new PageBuilder(types);
for (int rowNumber = 0; rowNumber < numRows; rowNumber++) {
pageBuilder.declarePosition();
for (int columnNumber = 0; columnNumber < testColumns.size(); columnNumber++) {
serializeObject(types.get(columnNumber), pageBuilder.getBlockBuilder(columnNumber), testColumns.get(columnNumber).getWriteValue(), testColumns.get(columnNumber).getObjectInspector(), false);
}
}
Page page = pageBuilder.build();
JobConf jobConf = new JobConf();
configureCompression(jobConf, compressionCodec);
Properties tableProperties = new Properties();
tableProperties.setProperty("columns", testColumns.stream().map(TestColumn::getName).collect(Collectors.joining(",")));
tableProperties.setProperty("columns.types", testColumns.stream().map(TestColumn::getType).collect(Collectors.joining(",")));
Optional<FileWriter> fileWriter = fileWriterFactory.createFileWriter(new Path(filePath), testColumns.stream().map(TestColumn::getName).collect(toList()), StorageFormat.fromHiveStorageFormat(storageFormat), tableProperties, jobConf, session, OptionalInt.empty(), NO_ACID_TRANSACTION, false, WriterKind.INSERT);
FileWriter hiveFileWriter = fileWriter.orElseThrow(() -> new IllegalArgumentException("fileWriterFactory"));
hiveFileWriter.appendRows(page);
hiveFileWriter.commit();
return new FileSplit(new Path(filePath), 0, new File(filePath).length(), new String[0]);
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class AbstractTestHiveFileSystem method createTable.
private void createTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// begin creating the table
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write the records
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the table
metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
transaction.commit();
// Hack to work around the metastore not being configured for S3 or other FS.
// The metastore tries to validate the location when creating the
// table, which fails without explicit configuration for file system.
// We work around that by using a dummy location when creating the
// table and update it here to the correct location.
metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle(), false).getTargetPath().toString());
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
// verify the data
metadata.beginQuery(session);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, tableHandle);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
}
metadata.cleanupQuery(session);
}
}
Aggregations