use of io.trino.spi.TrinoException in project trino by trinodb.
the class SemiTransactionalHiveMetastore method declareIntentionToWrite.
public synchronized String declareIntentionToWrite(ConnectorSession session, WriteMode writeMode, Path stagingPathRoot, SchemaTableName schemaTableName) {
setShared();
if (writeMode == WriteMode.DIRECT_TO_TARGET_EXISTING_DIRECTORY) {
Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions.get(schemaTableName);
if (partitionActionsOfTable != null && !partitionActionsOfTable.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "Cannot insert into a table with a partition that has been modified in the same transaction when Trino is configured to skip temporary directories.");
}
}
HdfsContext hdfsContext = new HdfsContext(session);
String queryId = session.getQueryId();
String declarationId = queryId + "_" + declaredIntentionsToWriteCounter;
declaredIntentionsToWriteCounter++;
declaredIntentionsToWrite.add(new DeclaredIntentionToWrite(declarationId, writeMode, hdfsContext, queryId, stagingPathRoot, schemaTableName));
return declarationId;
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class SemiTransactionalHiveMetastore method dropPartition.
public synchronized void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData) {
setShared();
Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions.computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
Action<PartitionAndMore> oldPartitionAction = partitionActionsOfTable.get(partitionValues);
if (oldPartitionAction == null) {
HdfsContext hdfsContext = new HdfsContext(session);
if (deleteData) {
partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP, null, hdfsContext, session.getQueryId()));
} else {
partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP_PRESERVE_DATA, null, hdfsContext, session.getQueryId()));
}
return;
}
switch(oldPartitionAction.getType()) {
case DROP:
case DROP_PRESERVE_DATA:
throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), partitionValues);
case ADD:
case ALTER:
case INSERT_EXISTING:
case DELETE_ROWS:
case UPDATE:
throw new TrinoException(NOT_SUPPORTED, format("dropping a partition added in the same transaction is not supported: %s %s %s", databaseName, tableName, partitionValues));
}
throw new IllegalStateException("Unknown action type");
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class SemiTransactionalHiveMetastore method truncateUnpartitionedTable.
public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) {
checkReadable();
Optional<Table> table = getTable(databaseName, tableName);
SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
if (table.isEmpty()) {
throw new TableNotFoundException(schemaTableName);
}
if (!table.get().getTableType().equals(MANAGED_TABLE.toString())) {
throw new TrinoException(NOT_SUPPORTED, "Cannot delete from non-managed Hive table");
}
if (!table.get().getPartitionColumns().isEmpty()) {
throw new IllegalArgumentException("Table is partitioned");
}
Path path = new Path(table.get().getStorage().getLocation());
HdfsContext context = new HdfsContext(session);
setExclusive((delegate, hdfsEnvironment) -> {
RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, context, path, ImmutableSet.of(""), false);
if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) {
throw new TrinoException(HIVE_FILESYSTEM_ERROR, format("Error deleting from unpartitioned table %s. These items cannot be deleted: %s", schemaTableName, recursiveDeleteResult.getNotDeletedEligibleItems()));
}
});
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class SemiTransactionalHiveMetastore method createTable.
/**
* {@code currentLocation} needs to be supplied if a writePath exists for the table.
*/
public synchronized void createTable(ConnectorSession session, Table table, PrincipalPrivileges principalPrivileges, Optional<Path> currentPath, Optional<List<String>> files, boolean ignoreExisting, PartitionStatistics statistics, boolean cleanExtraOutputFilesOnCommit) {
setShared();
// When creating a table, it should never have partition actions. This is just a sanity check.
checkNoPartitionAction(table.getDatabaseName(), table.getTableName());
Action<TableAndMore> oldTableAction = tableActions.get(table.getSchemaTableName());
TableAndMore tableAndMore = new TableAndMore(table, Optional.of(principalPrivileges), currentPath, files, ignoreExisting, statistics, statistics, cleanExtraOutputFilesOnCommit);
if (oldTableAction == null) {
HdfsContext hdfsContext = new HdfsContext(session);
tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ADD, tableAndMore, hdfsContext, session.getQueryId()));
return;
}
switch(oldTableAction.getType()) {
case DROP:
if (!oldTableAction.getHdfsContext().getIdentity().getUser().equals(session.getUser())) {
throw new TrinoException(TRANSACTION_CONFLICT, "Operation on the same table with different user in the same transaction is not supported");
}
HdfsContext hdfsContext = new HdfsContext(session);
tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ALTER, tableAndMore, hdfsContext, session.getQueryId()));
return;
case ADD:
case ALTER:
case INSERT_EXISTING:
case DELETE_ROWS:
case UPDATE:
throw new TableAlreadyExistsException(table.getSchemaTableName());
case DROP_PRESERVE_DATA:
// TODO
break;
}
throw new IllegalStateException("Unknown action type");
}
use of io.trino.spi.TrinoException in project trino by trinodb.
the class AlluxioHiveMetastore method getPartitionsByNames.
@Override
public Map<String, Optional<Partition>> getPartitionsByNames(Table table, List<String> partitionNames) {
if (partitionNames.isEmpty()) {
return Collections.emptyMap();
}
String databaseName = table.getDatabaseName();
String tableName = table.getTableName();
try {
// Get all partitions
List<PartitionInfo> partitionInfos = ProtoUtils.toPartitionInfoList(client.readTable(databaseName, tableName, Constraint.getDefaultInstance()));
// Check that table name is correct
// TODO also check for database name equality
partitionInfos = partitionInfos.stream().filter(partition -> partition.getTableName().equals(tableName)).collect(Collectors.toList());
Map<String, Optional<Partition>> result = partitionInfos.stream().filter(partitionName -> partitionNames.stream().anyMatch(partitionName.getPartitionName()::equals)).collect(Collectors.toMap(PartitionInfo::getPartitionName, partitionInfo -> Optional.of(ProtoUtils.fromProto(partitionInfo))));
return Collections.unmodifiableMap(result);
} catch (AlluxioStatusException e) {
throw new TrinoException(HIVE_METASTORE_ERROR, e);
}
}
Aggregations