use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class AbstractTestHive method alterBucketProperty.
private void alterBucketProperty(SchemaTableName schemaTableName, Optional<HiveBucketProperty> bucketProperty) {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
String tableOwner = session.getUser();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Optional<Table> table = transaction.getMetastore().getTable(schemaName, tableName);
Table.Builder tableBuilder = Table.builder(table.get());
tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty);
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
transaction.getMetastore().replaceTable(schemaName, tableName, tableBuilder.build(), principalPrivileges);
transaction.commit();
}
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class AbstractTestHive method doTestMismatchSchemaTable.
protected void doTestMismatchSchemaTable(SchemaTableName schemaTableName, HiveStorageFormat storageFormat, List<ColumnMetadata> tableBefore, MaterializedResult dataBefore, List<ColumnMetadata> tableAfter, MaterializedResult dataAfter) throws Exception {
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
doCreateEmptyTable(schemaTableName, storageFormat, tableBefore);
// insert the data
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
sink.appendPage(dataBefore.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
transaction.commit();
}
// load the table and verify the data
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toList());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), dataBefore.getMaterializedRows());
transaction.commit();
}
// alter the table schema
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session);
Table oldTable = transaction.getMetastore().getTable(schemaName, tableName).get();
List<Column> dataColumns = tableAfter.stream().filter(columnMetadata -> !columnMetadata.getName().equals("ds")).map(columnMetadata -> new Column(columnMetadata.getName(), toHiveType(columnMetadata.getType()), Optional.empty())).collect(toList());
Table.Builder newTable = Table.builder(oldTable).setDataColumns(dataColumns);
transaction.getMetastore().replaceTable(schemaName, tableName, newTable.build(), principalPrivileges);
transaction.commit();
}
// load the altered table and verify the data
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
List<ColumnHandle> columnHandles = metadata.getColumnHandles(session, tableHandle).values().stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toList());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), dataAfter.getMaterializedRows());
transaction.commit();
}
// insertions to the partitions with type mismatches should fail
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, schemaTableName);
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
sink.appendPage(dataAfter.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
transaction.commit();
fail("expected exception");
} catch (TrinoException e) {
// expected
assertEquals(e.getErrorCode(), HIVE_PARTITION_SCHEMA_MISMATCH.toErrorCode());
}
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class AbstractTestHive method testTableCreationIgnoreExisting.
@Test
public void testTableCreationIgnoreExisting() {
List<Column> columns = ImmutableList.of(new Column("dummy", HiveType.valueOf("uniontype<smallint,tinyint>"), Optional.empty()));
SchemaTableName schemaTableName = temporaryTable("create");
ConnectorSession session = newSession();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
PrincipalPrivileges privileges = testingPrincipalPrivilege(session);
Path targetPath;
try {
try (Transaction transaction = newTransaction()) {
LocationService locationService = getLocationService();
LocationHandle locationHandle = locationService.forNewTable(transaction.getMetastore(), session, schemaName, tableName, Optional.empty());
targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
Table table = createSimpleTable(schemaTableName, columns, session, targetPath, "q1");
transaction.getMetastore().createTable(session, table, privileges, Optional.empty(), Optional.empty(), false, EMPTY_TABLE_STATISTICS, false);
Optional<Table> tableHandle = transaction.getMetastore().getTable(schemaName, tableName);
assertTrue(tableHandle.isPresent());
transaction.commit();
}
// try creating it again from another transaction with ignoreExisting=false
try (Transaction transaction = newTransaction()) {
Table table = createSimpleTable(schemaTableName, columns, session, targetPath.suffix("_2"), "q2");
transaction.getMetastore().createTable(session, table, privileges, Optional.empty(), Optional.empty(), false, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
fail("Expected exception");
} catch (TrinoException e) {
assertInstanceOf(e, TableAlreadyExistsException.class);
}
// try creating it again from another transaction with ignoreExisting=true
try (Transaction transaction = newTransaction()) {
Table table = createSimpleTable(schemaTableName, columns, session, targetPath.suffix("_3"), "q3");
transaction.getMetastore().createTable(session, table, privileges, Optional.empty(), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
}
// at this point the table should exist, now try creating the table again with a different table definition
columns = ImmutableList.of(new Column("new_column", HiveType.valueOf("string"), Optional.empty()));
try (Transaction transaction = newTransaction()) {
Table table = createSimpleTable(schemaTableName, columns, session, targetPath.suffix("_4"), "q4");
transaction.getMetastore().createTable(session, table, privileges, Optional.empty(), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
fail("Expected exception");
} catch (TrinoException e) {
assertEquals(e.getErrorCode(), TRANSACTION_CONFLICT.toErrorCode());
assertEquals(e.getMessage(), format("Table already exists with a different schema: '%s'", schemaTableName.getTableName()));
}
} finally {
dropTable(schemaTableName);
}
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class DeltaLakeMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Database schema = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
boolean external = true;
String location = getLocation(tableMetadata.getProperties());
if (location == null) {
Optional<String> schemaLocation = getSchemaLocation(schema);
if (schemaLocation.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "The 'location' property must be specified either for the table or the schema");
}
location = new Path(schemaLocation.get(), tableName).toString();
checkPathContainsNoFiles(session, new Path(location));
external = false;
}
Path targetPath = new Path(location);
ensurePathExists(session, targetPath);
Path deltaLogDirectory = getTransactionLogDir(targetPath);
Optional<Long> checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties());
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsContext(session), targetPath);
if (!fileSystem.exists(deltaLogDirectory)) {
validateTableColumns(tableMetadata);
List<String> partitionColumns = getPartitionedBy(tableMetadata.getProperties());
List<DeltaLakeColumnHandle> deltaLakeColumns = tableMetadata.getColumns().stream().map(column -> toColumnHandle(column, partitionColumns)).collect(toImmutableList());
TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriterWithoutTransactionIsolation(session, targetPath.toString());
appendInitialTableEntries(transactionLogWriter, deltaLakeColumns, partitionColumns, buildDeltaMetadataConfiguration(checkpointInterval), CREATE_TABLE_OPERATION, session, nodeVersion, nodeId);
setRollback(() -> deleteRecursivelyIfExists(new HdfsContext(session), hdfsEnvironment, deltaLogDirectory));
transactionLogWriter.flush();
}
} catch (IOException e) {
throw new TrinoException(DELTA_LAKE_BAD_WRITE, "Unable to access file system for: " + location, e);
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(session.getUser())).setTableType(external ? EXTERNAL_TABLE.name() : MANAGED_TABLE.name()).setDataColumns(DUMMY_DATA_COLUMNS).setParameters(deltaTableProperties(session, location, external));
setDeltaStorageFormat(tableBuilder, location, targetPath);
Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow());
metastore.createTable(session, table, principalPrivileges);
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class TrinoHiveCatalog method createMaterializedView.
@Override
public void createMaterializedView(ConnectorSession session, SchemaTableName schemaViewName, ConnectorMaterializedViewDefinition definition, boolean replace, boolean ignoreExisting) {
Optional<io.trino.plugin.hive.metastore.Table> existing = metastore.getTable(schemaViewName.getSchemaName(), schemaViewName.getTableName());
// It's a create command where the materialized view already exists and 'if not exists' clause is not specified
if (!replace && existing.isPresent()) {
if (ignoreExisting) {
return;
}
throw new TrinoException(ALREADY_EXISTS, "Materialized view already exists: " + schemaViewName);
}
// Generate a storage table name and create a storage table. The properties in the definition are table properties for the
// storage table as indicated in the materialized view definition.
String storageTableName = "st_" + randomUUID().toString().replace("-", "");
Map<String, Object> storageTableProperties = new HashMap<>(definition.getProperties());
storageTableProperties.putIfAbsent(FILE_FORMAT_PROPERTY, DEFAULT_FILE_FORMAT_DEFAULT);
SchemaTableName storageTable = new SchemaTableName(schemaViewName.getSchemaName(), storageTableName);
List<ColumnMetadata> columns = definition.getColumns().stream().map(column -> new ColumnMetadata(column.getName(), typeManager.getType(column.getType()))).collect(toImmutableList());
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(storageTable, columns, storageTableProperties, Optional.empty());
Transaction transaction = IcebergUtil.newCreateTableTransaction(this, tableMetadata, session);
transaction.newAppend().commit();
transaction.commitTransaction();
// Create a view indicating the storage table
Map<String, String> viewProperties = ImmutableMap.<String, String>builder().put(PRESTO_QUERY_ID_NAME, session.getQueryId()).put(STORAGE_TABLE, storageTableName).put(PRESTO_VIEW_FLAG, "true").put(TRINO_CREATED_BY, TRINO_CREATED_BY_VALUE).put(TABLE_COMMENT, ICEBERG_MATERIALIZED_VIEW_COMMENT).buildOrThrow();
Column dummyColumn = new Column("dummy", HIVE_STRING, Optional.empty());
io.trino.plugin.hive.metastore.Table.Builder tableBuilder = io.trino.plugin.hive.metastore.Table.builder().setDatabaseName(schemaViewName.getSchemaName()).setTableName(schemaViewName.getTableName()).setOwner(isUsingSystemSecurity ? Optional.empty() : Optional.of(session.getUser())).setTableType(VIRTUAL_VIEW.name()).setDataColumns(ImmutableList.of(dummyColumn)).setPartitionColumns(ImmutableList.of()).setParameters(viewProperties).withStorage(storage -> storage.setStorageFormat(VIEW_STORAGE_FORMAT)).withStorage(storage -> storage.setLocation("")).setViewOriginalText(Optional.of(encodeMaterializedViewData(fromConnectorMaterializedViewDefinition(definition)))).setViewExpandedText(Optional.of("/* Presto Materialized View */"));
io.trino.plugin.hive.metastore.Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = isUsingSystemSecurity ? NO_PRIVILEGES : buildInitialPrivilegeSet(session.getUser());
if (existing.isPresent() && replace) {
// drop the current storage table
String oldStorageTable = existing.get().getParameters().get(STORAGE_TABLE);
if (oldStorageTable != null) {
metastore.dropTable(schemaViewName.getSchemaName(), oldStorageTable, true);
}
// Replace the existing view definition
metastore.replaceTable(schemaViewName.getSchemaName(), schemaViewName.getTableName(), table, principalPrivileges);
return;
}
// create the view definition
metastore.createTable(table, principalPrivileges);
}
Aggregations