use of io.trino.plugin.hive.metastore.PrincipalPrivileges.NO_PRIVILEGES in project trino by trinodb.
the class AbstractMetastoreTableOperations method commitNewTable.
@Override
protected final void commitNewTable(TableMetadata metadata) {
String newMetadataLocation = writeNewMetadata(metadata, version + 1);
Table.Builder builder = Table.builder().setDatabaseName(database).setTableName(tableName).setOwner(owner).setTableType(TableType.EXTERNAL_TABLE.name()).setDataColumns(toHiveColumns(metadata.schema().columns())).withStorage(storage -> storage.setLocation(metadata.location())).withStorage(storage -> storage.setStorageFormat(STORAGE_FORMAT)).setParameter("EXTERNAL", "TRUE").setParameter(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE).setParameter(METADATA_LOCATION_PROP, newMetadataLocation);
String tableComment = metadata.properties().get(TABLE_COMMENT);
if (tableComment != null) {
builder.setParameter(TABLE_COMMENT, tableComment);
}
Table table = builder.build();
PrincipalPrivileges privileges = owner.map(MetastoreUtil::buildInitialPrivilegeSet).orElse(NO_PRIVILEGES);
metastore.createTable(table, privileges);
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges.NO_PRIVILEGES in project trino by trinodb.
the class FileMetastoreTableOperations method commitToExistingTable.
@Override
protected void commitToExistingTable(TableMetadata base, TableMetadata metadata) {
Table currentTable = getTable();
checkState(currentMetadataLocation != null, "No current metadata location for existing table");
String metadataLocation = currentTable.getParameters().get(METADATA_LOCATION_PROP);
if (!currentMetadataLocation.equals(metadataLocation)) {
throw new CommitFailedException("Metadata location [%s] is not same as table metadata location [%s] for %s", currentMetadataLocation, metadataLocation, getSchemaTableName());
}
String newMetadataLocation = writeNewMetadata(metadata, version + 1);
Table table = Table.builder(currentTable).setDataColumns(toHiveColumns(metadata.schema().columns())).withStorage(storage -> storage.setLocation(metadata.location())).setParameter(METADATA_LOCATION_PROP, newMetadataLocation).setParameter(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation).build();
// todo privileges should not be replaced for an alter
PrincipalPrivileges privileges = table.getOwner().map(MetastoreUtil::buildInitialPrivilegeSet).orElse(NO_PRIVILEGES);
metastore.replaceTable(database, tableName, table, privileges);
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges.NO_PRIVILEGES in project trino by trinodb.
the class TrinoHiveCatalog method createMaterializedView.
@Override
public void createMaterializedView(ConnectorSession session, SchemaTableName schemaViewName, ConnectorMaterializedViewDefinition definition, boolean replace, boolean ignoreExisting) {
Optional<io.trino.plugin.hive.metastore.Table> existing = metastore.getTable(schemaViewName.getSchemaName(), schemaViewName.getTableName());
// It's a create command where the materialized view already exists and 'if not exists' clause is not specified
if (!replace && existing.isPresent()) {
if (ignoreExisting) {
return;
}
throw new TrinoException(ALREADY_EXISTS, "Materialized view already exists: " + schemaViewName);
}
// Generate a storage table name and create a storage table. The properties in the definition are table properties for the
// storage table as indicated in the materialized view definition.
String storageTableName = "st_" + randomUUID().toString().replace("-", "");
Map<String, Object> storageTableProperties = new HashMap<>(definition.getProperties());
storageTableProperties.putIfAbsent(FILE_FORMAT_PROPERTY, DEFAULT_FILE_FORMAT_DEFAULT);
SchemaTableName storageTable = new SchemaTableName(schemaViewName.getSchemaName(), storageTableName);
List<ColumnMetadata> columns = definition.getColumns().stream().map(column -> new ColumnMetadata(column.getName(), typeManager.getType(column.getType()))).collect(toImmutableList());
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(storageTable, columns, storageTableProperties, Optional.empty());
Transaction transaction = IcebergUtil.newCreateTableTransaction(this, tableMetadata, session);
transaction.newAppend().commit();
transaction.commitTransaction();
// Create a view indicating the storage table
Map<String, String> viewProperties = ImmutableMap.<String, String>builder().put(PRESTO_QUERY_ID_NAME, session.getQueryId()).put(STORAGE_TABLE, storageTableName).put(PRESTO_VIEW_FLAG, "true").put(TRINO_CREATED_BY, TRINO_CREATED_BY_VALUE).put(TABLE_COMMENT, ICEBERG_MATERIALIZED_VIEW_COMMENT).buildOrThrow();
Column dummyColumn = new Column("dummy", HIVE_STRING, Optional.empty());
io.trino.plugin.hive.metastore.Table.Builder tableBuilder = io.trino.plugin.hive.metastore.Table.builder().setDatabaseName(schemaViewName.getSchemaName()).setTableName(schemaViewName.getTableName()).setOwner(isUsingSystemSecurity ? Optional.empty() : Optional.of(session.getUser())).setTableType(VIRTUAL_VIEW.name()).setDataColumns(ImmutableList.of(dummyColumn)).setPartitionColumns(ImmutableList.of()).setParameters(viewProperties).withStorage(storage -> storage.setStorageFormat(VIEW_STORAGE_FORMAT)).withStorage(storage -> storage.setLocation("")).setViewOriginalText(Optional.of(encodeMaterializedViewData(fromConnectorMaterializedViewDefinition(definition)))).setViewExpandedText(Optional.of("/* Presto Materialized View */"));
io.trino.plugin.hive.metastore.Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = isUsingSystemSecurity ? NO_PRIVILEGES : buildInitialPrivilegeSet(session.getUser());
if (existing.isPresent() && replace) {
// drop the current storage table
String oldStorageTable = existing.get().getParameters().get(STORAGE_TABLE);
if (oldStorageTable != null) {
metastore.dropTable(schemaViewName.getSchemaName(), oldStorageTable, true);
}
// Replace the existing view definition
metastore.replaceTable(schemaViewName.getSchemaName(), schemaViewName.getTableName(), table, principalPrivileges);
return;
}
// create the view definition
metastore.createTable(table, principalPrivileges);
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges.NO_PRIVILEGES in project trino by trinodb.
the class HiveMetadata method finishCreateTable.
@Override
public Optional<ConnectorOutputMetadata> finishCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics) {
HiveOutputTableHandle handle = (HiveOutputTableHandle) tableHandle;
List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toImmutableList());
WriteInfo writeInfo = locationService.getQueryWriteInfo(handle.getLocationHandle());
Table table = buildTableObject(session.getQueryId(), handle.getSchemaName(), handle.getTableName(), handle.getTableOwner(), handle.getInputColumns(), handle.getTableStorageFormat(), handle.getPartitionedBy(), handle.getBucketProperty(), handle.getAdditionalTableParameters(), Optional.of(writeInfo.getTargetPath()), handle.isExternal(), prestoVersion, accessControlMetadata.isUsingSystemSecurity());
PrincipalPrivileges principalPrivileges = accessControlMetadata.isUsingSystemSecurity() ? NO_PRIVILEGES : buildInitialPrivilegeSet(handle.getTableOwner());
partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates);
if (handle.getBucketProperty().isPresent() && isCreateEmptyBucketFiles(session)) {
List<PartitionUpdate> partitionUpdatesForMissingBuckets = computePartitionUpdatesForMissingBuckets(session, handle, table, true, partitionUpdates);
// replace partitionUpdates before creating the empty files so that those files will be cleaned up if we end up rollback
partitionUpdates = PartitionUpdate.mergePartitionUpdates(concat(partitionUpdates, partitionUpdatesForMissingBuckets));
for (PartitionUpdate partitionUpdate : partitionUpdatesForMissingBuckets) {
Optional<Partition> partition = table.getPartitionColumns().isEmpty() ? Optional.empty() : Optional.of(buildPartitionObject(session, table, partitionUpdate));
createEmptyFiles(session, partitionUpdate.getWritePath(), table, partition, partitionUpdate.getFileNames());
}
if (handle.isTransactional()) {
AcidTransaction transaction = handle.getTransaction();
List<String> partitionNames = partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList());
metastore.addDynamicPartitions(handle.getSchemaName(), handle.getTableName(), partitionNames, transaction.getAcidTransactionId(), transaction.getWriteId(), AcidOperation.CREATE_TABLE);
}
}
Map<String, Type> columnTypes = handle.getInputColumns().stream().collect(toImmutableMap(HiveColumnHandle::getName, column -> column.getHiveType().getType(typeManager)));
Map<List<String>, ComputedStatistics> partitionComputedStatistics = createComputedStatisticsToPartitionMap(computedStatistics, handle.getPartitionedBy(), columnTypes);
PartitionStatistics tableStatistics;
if (table.getPartitionColumns().isEmpty()) {
HiveBasicStatistics basicStatistics = partitionUpdates.stream().map(PartitionUpdate::getStatistics).reduce((first, second) -> reduce(first, second, ADD)).orElse(createZeroStatistics());
tableStatistics = createPartitionStatistics(basicStatistics, columnTypes, getColumnStatistics(partitionComputedStatistics, ImmutableList.of()));
} else {
tableStatistics = new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of());
}
if (handle.getPartitionedBy().isEmpty()) {
List<String> fileNames;
if (partitionUpdates.isEmpty()) {
// creating empty table via CTAS ... WITH NO DATA
fileNames = ImmutableList.of();
} else {
fileNames = getOnlyElement(partitionUpdates).getFileNames();
}
metastore.createTable(session, table, principalPrivileges, Optional.of(writeInfo.getWritePath()), Optional.of(fileNames), false, tableStatistics, handle.isRetriesEnabled());
} else {
metastore.createTable(session, table, principalPrivileges, Optional.of(writeInfo.getWritePath()), Optional.empty(), false, tableStatistics, false);
}
if (!handle.getPartitionedBy().isEmpty()) {
if (isRespectTableFormat(session)) {
verify(handle.getPartitionStorageFormat() == handle.getTableStorageFormat());
}
for (PartitionUpdate update : partitionUpdates) {
Partition partition = buildPartitionObject(session, table, update);
PartitionStatistics partitionStatistics = createPartitionStatistics(update.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, partition.getValues()));
metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), buildPartitionObject(session, table, update), update.getWritePath(), Optional.of(update.getFileNames()), partitionStatistics, handle.isRetriesEnabled());
}
}
return Optional.of(new HiveWrittenPartitions(partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList())));
}
Aggregations