use of io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore in project trino by trinodb.
the class SyncPartitionMetadataProcedure method doSyncPartitionMetadata.
private void doSyncPartitionMetadata(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, String mode, boolean caseSensitive) {
SyncMode syncMode = toSyncMode(mode);
HdfsContext hdfsContext = new HdfsContext(session);
SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore();
SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName);
Table table = metastore.getTable(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
if (table.getPartitionColumns().isEmpty()) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Table is not partitioned: " + schemaTableName);
}
if (syncMode == SyncMode.ADD || syncMode == SyncMode.FULL) {
accessControl.checkCanInsertIntoTable(null, new SchemaTableName(schemaName, tableName));
}
if (syncMode == SyncMode.DROP || syncMode == SyncMode.FULL) {
accessControl.checkCanDeleteFromTable(null, new SchemaTableName(schemaName, tableName));
}
Path tableLocation = new Path(table.getStorage().getLocation());
Set<String> partitionsToAdd;
Set<String> partitionsToDrop;
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(hdfsContext, tableLocation);
List<String> partitionsInMetastore = metastore.getPartitionNames(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
List<String> partitionsInFileSystem = listDirectory(fileSystem, fileSystem.getFileStatus(tableLocation), table.getPartitionColumns(), table.getPartitionColumns().size(), caseSensitive).stream().map(fileStatus -> fileStatus.getPath().toUri()).map(uri -> tableLocation.toUri().relativize(uri).getPath()).collect(toImmutableList());
// partitions in file system but not in metastore
partitionsToAdd = difference(partitionsInFileSystem, partitionsInMetastore);
// partitions in metastore but not in file system
partitionsToDrop = difference(partitionsInMetastore, partitionsInFileSystem);
} catch (IOException e) {
throw new TrinoException(HIVE_FILESYSTEM_ERROR, e);
}
syncPartitions(partitionsToAdd, partitionsToDrop, syncMode, metastore, session, table);
}
use of io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore in project trino by trinodb.
the class UnregisterPartitionProcedure method doUnregisterPartition.
private void doUnregisterPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List<String> partitionColumn, List<String> partitionValues) {
SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName);
SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore();
Table table = metastore.getTable(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
accessControl.checkCanDeleteFromTable(null, schemaTableName);
checkIsPartitionedTable(table);
checkPartitionColumns(table, partitionColumn);
String partitionName = FileUtils.makePartName(partitionColumn, partitionValues);
Partition partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(schemaName, tableName, partitionValues).orElseThrow(() -> new TrinoException(NOT_FOUND, format("Partition '%s' does not exist", partitionName)));
metastore.dropPartition(session, table.getDatabaseName(), table.getTableName(), partition.getValues(), false);
metastore.commit();
}
use of io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore in project trino by trinodb.
the class AbstractTestHive method partitionTargetPath.
protected String partitionTargetPath(SchemaTableName schemaTableName, String partitionName) {
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
SemiTransactionalHiveMetastore metastore = transaction.getMetastore();
LocationService locationService = getLocationService();
Table table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get();
LocationHandle handle = locationService.forExistingTable(metastore, session, table);
return locationService.getPartitionWriteInfo(handle, Optional.empty(), partitionName).getTargetPath().toString();
}
}
use of io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore in project trino by trinodb.
the class AbstractTestHive method listAllDataPaths.
public static List<String> listAllDataPaths(SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) {
ImmutableList.Builder<String> locations = ImmutableList.builder();
Table table = metastore.getTable(schemaName, tableName).get();
if (table.getStorage().getLocation() != null) {
// For partitioned table, there should be nothing directly under this directory.
// But including this location in the set makes the directory content assert more
// extensive, which is desirable.
locations.add(table.getStorage().getLocation());
}
Optional<List<String>> partitionNames = metastore.getPartitionNames(schemaName, tableName);
if (partitionNames.isPresent()) {
metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).values().stream().map(Optional::get).map(partition -> partition.getStorage().getLocation()).filter(location -> !location.startsWith(table.getStorage().getLocation())).forEach(locations::add);
}
return locations.build();
}
use of io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore in project trino by trinodb.
the class RegisterPartitionProcedure method doRegisterPartition.
private void doRegisterPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List<String> partitionColumn, List<String> partitionValues, String location) {
if (!allowRegisterPartition) {
throw new TrinoException(PERMISSION_DENIED, "register_partition procedure is disabled");
}
SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore();
HdfsContext hdfsContext = new HdfsContext(session);
SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName);
Table table = metastore.getTable(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
accessControl.checkCanInsertIntoTable(null, schemaTableName);
checkIsPartitionedTable(table);
checkPartitionColumns(table, partitionColumn);
Optional<Partition> partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(schemaName, tableName, partitionValues);
if (partition.isPresent()) {
String partitionName = FileUtils.makePartName(partitionColumn, partitionValues);
throw new TrinoException(ALREADY_EXISTS, format("Partition [%s] is already registered with location %s", partitionName, partition.get().getStorage().getLocation()));
}
Path partitionLocation;
if (location == null) {
partitionLocation = new Path(table.getStorage().getLocation(), FileUtils.makePartName(partitionColumn, partitionValues));
} else {
partitionLocation = new Path(location);
}
if (!HiveWriteUtils.pathExists(hdfsContext, hdfsEnvironment, partitionLocation)) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Partition location does not exist: " + partitionLocation);
}
metastore.addPartition(session, table.getDatabaseName(), table.getTableName(), buildPartitionObject(session, table, partitionValues, partitionLocation), partitionLocation, // no need for failed attempts cleanup
Optional.empty(), PartitionStatistics.empty(), false);
metastore.commit();
}
Aggregations