use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class HiveMetadata method createView.
@Override
public void createView(ConnectorSession session, SchemaTableName viewName, ConnectorViewDefinition definition, boolean replace) {
if (accessControlMetadata.isUsingSystemSecurity()) {
definition = definition.withoutOwner();
}
Map<String, String> properties = ImmutableMap.<String, String>builder().put(TABLE_COMMENT, PRESTO_VIEW_COMMENT).put(PRESTO_VIEW_FLAG, "true").put(TRINO_CREATED_BY, "Trino Hive connector").put(PRESTO_VERSION_NAME, prestoVersion).put(PRESTO_QUERY_ID_NAME, session.getQueryId()).buildOrThrow();
Column dummyColumn = new Column("dummy", HIVE_STRING, Optional.empty());
Table.Builder tableBuilder = Table.builder().setDatabaseName(viewName.getSchemaName()).setTableName(viewName.getTableName()).setOwner(accessControlMetadata.isUsingSystemSecurity() ? Optional.empty() : Optional.ofNullable(session.getUser())).setTableType(TableType.VIRTUAL_VIEW.name()).setDataColumns(ImmutableList.of(dummyColumn)).setPartitionColumns(ImmutableList.of()).setParameters(properties).setViewOriginalText(Optional.of(encodeViewData(definition))).setViewExpandedText(Optional.of(PRESTO_VIEW_EXPANDED_TEXT_MARKER));
tableBuilder.getStorageBuilder().setStorageFormat(VIEW_STORAGE_FORMAT).setLocation("");
Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = accessControlMetadata.isUsingSystemSecurity() ? NO_PRIVILEGES : buildInitialPrivilegeSet(session.getUser());
Optional<Table> existing = metastore.getTable(viewName.getSchemaName(), viewName.getTableName());
if (existing.isPresent()) {
if (!replace || !isPrestoView(existing.get())) {
throw new ViewAlreadyExistsException(viewName);
}
metastore.replaceTable(viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges);
return;
}
try {
metastore.createTable(session, table, principalPrivileges, Optional.empty(), Optional.empty(), false, new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of()), false);
} catch (TableAlreadyExistsException e) {
throw new ViewAlreadyExistsException(e.getTableName());
}
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class HiveMetadata method finishInsert.
@Override
public Optional<ConnectorOutputMetadata> finishInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics) {
HiveInsertTableHandle handle = (HiveInsertTableHandle) insertHandle;
List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toImmutableList());
HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat();
partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates);
Table table = metastore.getTable(handle.getSchemaName(), handle.getTableName()).orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName()));
if (!table.getStorage().getStorageFormat().getInputFormat().equals(tableStorageFormat.getInputFormat()) && isRespectTableFormat(session)) {
throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during insert");
}
if (handle.getBucketProperty().isPresent() && isCreateEmptyBucketFiles(session)) {
List<PartitionUpdate> partitionUpdatesForMissingBuckets = computePartitionUpdatesForMissingBuckets(session, handle, table, false, partitionUpdates);
// replace partitionUpdates before creating the empty files so that those files will be cleaned up if we end up rollback
partitionUpdates = PartitionUpdate.mergePartitionUpdates(concat(partitionUpdates, partitionUpdatesForMissingBuckets));
for (PartitionUpdate partitionUpdate : partitionUpdatesForMissingBuckets) {
Optional<Partition> partition = table.getPartitionColumns().isEmpty() ? Optional.empty() : Optional.of(buildPartitionObject(session, table, partitionUpdate));
if (handle.isTransactional() && partition.isPresent()) {
PartitionStatistics statistics = PartitionStatistics.builder().setBasicStatistics(partitionUpdate.getStatistics()).build();
metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), partition.get(), partitionUpdate.getWritePath(), Optional.of(partitionUpdate.getFileNames()), statistics, handle.isRetriesEnabled());
}
createEmptyFiles(session, partitionUpdate.getWritePath(), table, partition, partitionUpdate.getFileNames());
}
}
List<String> partitionedBy = table.getPartitionColumns().stream().map(Column::getName).collect(toImmutableList());
Map<String, Type> columnTypes = handle.getInputColumns().stream().collect(toImmutableMap(HiveColumnHandle::getName, column -> column.getHiveType().getType(typeManager)));
Map<List<String>, ComputedStatistics> partitionComputedStatistics = createComputedStatisticsToPartitionMap(computedStatistics, partitionedBy, columnTypes);
for (PartitionUpdate partitionUpdate : partitionUpdates) {
if (partitionUpdate.getName().isEmpty()) {
// insert into unpartitioned table
if (!table.getStorage().getStorageFormat().getInputFormat().equals(handle.getPartitionStorageFormat().getInputFormat()) && isRespectTableFormat(session)) {
throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during insert");
}
PartitionStatistics partitionStatistics = createPartitionStatistics(partitionUpdate.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, ImmutableList.of()));
if (partitionUpdate.getUpdateMode() == OVERWRITE) {
// get privileges from existing table
PrincipalPrivileges principalPrivileges = fromHivePrivilegeInfos(metastore.listTablePrivileges(handle.getSchemaName(), handle.getTableName(), Optional.empty()));
// first drop it
metastore.dropTable(session, handle.getSchemaName(), handle.getTableName());
// create the table with the new location
metastore.createTable(session, table, principalPrivileges, Optional.of(partitionUpdate.getWritePath()), Optional.of(partitionUpdate.getFileNames()), false, partitionStatistics, handle.isRetriesEnabled());
} else if (partitionUpdate.getUpdateMode() == NEW || partitionUpdate.getUpdateMode() == APPEND) {
// insert into unpartitioned table
metastore.finishInsertIntoExistingTable(session, handle.getSchemaName(), handle.getTableName(), partitionUpdate.getWritePath(), partitionUpdate.getFileNames(), partitionStatistics, handle.isRetriesEnabled());
} else {
throw new IllegalArgumentException("Unsupported update mode: " + partitionUpdate.getUpdateMode());
}
} else if (partitionUpdate.getUpdateMode() == APPEND) {
// insert into existing partition
List<String> partitionValues = toPartitionValues(partitionUpdate.getName());
PartitionStatistics partitionStatistics = createPartitionStatistics(partitionUpdate.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, partitionValues));
metastore.finishInsertIntoExistingPartition(session, handle.getSchemaName(), handle.getTableName(), partitionValues, partitionUpdate.getWritePath(), partitionUpdate.getFileNames(), partitionStatistics, handle.isRetriesEnabled());
} else if (partitionUpdate.getUpdateMode() == NEW || partitionUpdate.getUpdateMode() == OVERWRITE) {
// insert into new partition or overwrite existing partition
Partition partition = buildPartitionObject(session, table, partitionUpdate);
if (!partition.getStorage().getStorageFormat().getInputFormat().equals(handle.getPartitionStorageFormat().getInputFormat()) && isRespectTableFormat(session)) {
throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Partition format changed during insert");
}
PartitionStatistics partitionStatistics = createPartitionStatistics(partitionUpdate.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, partition.getValues()));
if (partitionUpdate.getUpdateMode() == OVERWRITE) {
if (handle.getLocationHandle().getWriteMode() == DIRECT_TO_TARGET_EXISTING_DIRECTORY) {
removeNonCurrentQueryFiles(session, partitionUpdate.getTargetPath());
if (handle.isRetriesEnabled()) {
HdfsContext hdfsContext = new HdfsContext(session);
cleanExtraOutputFiles(hdfsEnvironment, hdfsContext, session.getQueryId(), partitionUpdate.getTargetPath(), ImmutableSet.copyOf(partitionUpdate.getFileNames()));
}
} else {
metastore.dropPartition(session, handle.getSchemaName(), handle.getTableName(), partition.getValues(), true);
metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), partition, partitionUpdate.getWritePath(), Optional.of(partitionUpdate.getFileNames()), partitionStatistics, handle.isRetriesEnabled());
}
} else {
metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), partition, partitionUpdate.getWritePath(), Optional.of(partitionUpdate.getFileNames()), partitionStatistics, handle.isRetriesEnabled());
}
} else {
throw new IllegalArgumentException(format("Unsupported update mode: %s", partitionUpdate.getUpdateMode()));
}
}
if (isFullAcidTable(table.getParameters())) {
HdfsContext context = new HdfsContext(session);
for (PartitionUpdate update : partitionUpdates) {
long writeId = handle.getTransaction().getWriteId();
Path deltaDirectory = new Path(format("%s/%s/%s", table.getStorage().getLocation(), update.getName(), deltaSubdir(writeId, writeId, 0)));
createOrcAcidVersionFile(context, deltaDirectory);
}
}
return Optional.of(new HiveWrittenPartitions(partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList())));
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class AbstractTestHive method testIllegalStorageFormatDuringTableScan.
/**
* During table scan, the illegal storage format for some specific table should not fail the whole table scan
*/
@Test
public void testIllegalStorageFormatDuringTableScan() {
SchemaTableName schemaTableName = temporaryTable("test_illegal_storage_format");
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
List<Column> columns = ImmutableList.of(new Column("pk", HIVE_STRING, Optional.empty()));
String tableOwner = session.getUser();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
LocationHandle locationHandle = locationService.forNewTable(transaction.getMetastore(), session, schemaName, tableName, Optional.empty());
Path targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
// create table whose storage format is null
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(tableOwner)).setTableType(TableType.MANAGED_TABLE.name()).setParameters(ImmutableMap.of(PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())).setDataColumns(columns).withStorage(storage -> storage.setLocation(targetPath.toString()).setStorageFormat(StorageFormat.createNullable(null, null, null)).setSerdeParameters(ImmutableMap.of()));
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
transaction.getMetastore().createTable(session, tableBuilder.build(), principalPrivileges, Optional.empty(), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
}
// to make sure it can still be retrieved instead of throwing exception.
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
Map<SchemaTableName, List<ColumnMetadata>> allColumns = listTableColumns(metadata, newSession(), new SchemaTablePrefix(schemaTableName.getSchemaName()));
assertTrue(allColumns.containsKey(schemaTableName));
} finally {
dropTable(schemaTableName);
}
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class AbstractTestHiveLocal method createExternalTable.
private void createExternalTable(SchemaTableName schemaTableName, HiveStorageFormat hiveStorageFormat, List<Column> columns, List<Column> partitionColumns, Optional<HiveBucketProperty> bucketProperty, Path externalLocation) {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
String tableOwner = session.getUser();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(tableOwner)).setTableType(TableType.EXTERNAL_TABLE.name()).setParameters(ImmutableMap.of(PRESTO_VERSION_NAME, TEST_SERVER_VERSION, PRESTO_QUERY_ID_NAME, session.getQueryId())).setDataColumns(columns).setPartitionColumns(partitionColumns);
tableBuilder.getStorageBuilder().setLocation(externalLocation.toString()).setStorageFormat(StorageFormat.create(hiveStorageFormat.getSerde(), hiveStorageFormat.getInputFormat(), hiveStorageFormat.getOutputFormat())).setBucketProperty(bucketProperty).setSerdeParameters(ImmutableMap.of());
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
transaction.getMetastore().createTable(session, tableBuilder.build(), principalPrivileges, Optional.of(externalLocation), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
}
}
use of io.trino.plugin.hive.metastore.PrincipalPrivileges in project trino by trinodb.
the class AbstractTestHive method createEmptyTable.
protected void createEmptyTable(SchemaTableName schemaTableName, HiveStorageFormat hiveStorageFormat, List<Column> columns, List<Column> partitionColumns, Optional<HiveBucketProperty> bucketProperty, boolean isTransactional) throws Exception {
Path targetPath;
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
String tableOwner = session.getUser();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
LocationService locationService = getLocationService();
LocationHandle locationHandle = locationService.forNewTable(transaction.getMetastore(), session, schemaName, tableName, Optional.empty());
targetPath = locationService.getQueryWriteInfo(locationHandle).getTargetPath();
ImmutableMap.Builder<String, String> tableParamBuilder = ImmutableMap.<String, String>builder().put(PRESTO_VERSION_NAME, TEST_SERVER_VERSION).put(PRESTO_QUERY_ID_NAME, session.getQueryId());
if (isTransactional) {
tableParamBuilder.put(TRANSACTIONAL, "true");
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(tableOwner)).setTableType(TableType.MANAGED_TABLE.name()).setParameters(tableParamBuilder.buildOrThrow()).setDataColumns(columns).setPartitionColumns(partitionColumns);
tableBuilder.getStorageBuilder().setLocation(targetPath.toString()).setStorageFormat(StorageFormat.create(hiveStorageFormat.getSerde(), hiveStorageFormat.getInputFormat(), hiveStorageFormat.getOutputFormat())).setBucketProperty(bucketProperty).setSerdeParameters(ImmutableMap.of());
PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser());
transaction.getMetastore().createTable(session, tableBuilder.build(), principalPrivileges, Optional.empty(), Optional.empty(), true, EMPTY_TABLE_STATISTICS, false);
transaction.commit();
}
HdfsContext context = new HdfsContext(newSession());
List<String> targetDirectoryList = listDirectory(context, targetPath);
assertEquals(targetDirectoryList, ImmutableList.of());
}
Aggregations