use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class HivePartitionManager method getPartitions.
public HivePartitionResult getPartitions(ConnectorTableHandle tableHandle, List<List<String>> partitionValuesList) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = hiveTableHandle.getSchemaTableName();
List<HiveColumnHandle> partitionColumns = hiveTableHandle.getPartitionColumns();
Optional<HiveBucketHandle> bucketHandle = hiveTableHandle.getBucketHandle();
List<String> partitionColumnNames = partitionColumns.stream().map(HiveColumnHandle::getName).collect(toImmutableList());
List<Type> partitionColumnTypes = partitionColumns.stream().map(HiveColumnHandle::getType).collect(toImmutableList());
List<HivePartition> partitionList = partitionValuesList.stream().map(partitionValues -> toPartitionName(partitionColumnNames, partitionValues)).map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionColumnTypes, TupleDomain.all(), value -> true)).map(partition -> partition.orElseThrow(() -> new VerifyException("partition must exist"))).collect(toImmutableList());
return new HivePartitionResult(partitionColumns, Optional.empty(), partitionList, TupleDomain.all(), TupleDomain.all(), bucketHandle, Optional.empty());
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class SemiTransactionalHiveMetastore method finishUpdate.
public synchronized void finishUpdate(ConnectorSession session, String databaseName, String tableName, Path currentLocation, List<PartitionAndStatementId> partitionAndStatementIds) {
if (partitionAndStatementIds.isEmpty()) {
return;
}
setShared();
SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
Action<TableAndMore> oldTableAction = tableActions.get(schemaTableName);
if (oldTableAction == null) {
Table table = getExistingTable(schemaTableName.getSchemaName(), schemaTableName.getTableName());
HdfsContext hdfsContext = new HdfsContext(session);
PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow());
tableActions.put(schemaTableName, new Action<>(ActionType.UPDATE, new TableAndAcidDirectories(table, Optional.of(principalPrivileges), Optional.of(currentLocation), partitionAndStatementIds), hdfsContext, session.getQueryId()));
return;
}
switch(oldTableAction.getType()) {
case DROP:
throw new TableNotFoundException(schemaTableName);
case ADD:
case ALTER:
case INSERT_EXISTING:
case DELETE_ROWS:
case UPDATE:
throw new UnsupportedOperationException("Inserting, updating or deleting in a table that was added, altered, inserted into, updated or deleted from in the same transaction is not supported");
case DROP_PRESERVE_DATA:
// TODO
break;
}
throw new IllegalStateException("Unknown action type");
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class SemiTransactionalHiveMetastore method getTable.
public synchronized Optional<Table> getTable(String databaseName, String tableName) {
checkReadable();
Action<TableAndMore> tableAction = tableActions.get(new SchemaTableName(databaseName, tableName));
if (tableAction == null) {
return delegate.getTable(databaseName, tableName);
}
switch(tableAction.getType()) {
case ADD:
case ALTER:
case INSERT_EXISTING:
case DELETE_ROWS:
case UPDATE:
return Optional.of(tableAction.getData().getTable());
case DROP:
return Optional.empty();
case DROP_PRESERVE_DATA:
// TODO
break;
}
throw new IllegalStateException("Unknown action type");
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class SemiTransactionalHiveMetastore method dropPartition.
public synchronized void dropPartition(ConnectorSession session, String databaseName, String tableName, List<String> partitionValues, boolean deleteData) {
setShared();
Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions.computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
Action<PartitionAndMore> oldPartitionAction = partitionActionsOfTable.get(partitionValues);
if (oldPartitionAction == null) {
HdfsContext hdfsContext = new HdfsContext(session);
if (deleteData) {
partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP, null, hdfsContext, session.getQueryId()));
} else {
partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP_PRESERVE_DATA, null, hdfsContext, session.getQueryId()));
}
return;
}
switch(oldPartitionAction.getType()) {
case DROP:
case DROP_PRESERVE_DATA:
throw new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), partitionValues);
case ADD:
case ALTER:
case INSERT_EXISTING:
case DELETE_ROWS:
case UPDATE:
throw new TrinoException(NOT_SUPPORTED, format("dropping a partition added in the same transaction is not supported: %s %s %s", databaseName, tableName, partitionValues));
}
throw new IllegalStateException("Unknown action type");
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class SemiTransactionalHiveMetastore method truncateUnpartitionedTable.
public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) {
checkReadable();
Optional<Table> table = getTable(databaseName, tableName);
SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName);
if (table.isEmpty()) {
throw new TableNotFoundException(schemaTableName);
}
if (!table.get().getTableType().equals(MANAGED_TABLE.toString())) {
throw new TrinoException(NOT_SUPPORTED, "Cannot delete from non-managed Hive table");
}
if (!table.get().getPartitionColumns().isEmpty()) {
throw new IllegalArgumentException("Table is partitioned");
}
Path path = new Path(table.get().getStorage().getLocation());
HdfsContext context = new HdfsContext(session);
setExclusive((delegate, hdfsEnvironment) -> {
RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, context, path, ImmutableSet.of(""), false);
if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) {
throw new TrinoException(HIVE_FILESYSTEM_ERROR, format("Error deleting from unpartitioned table %s. These items cannot be deleted: %s", schemaTableName, recursiveDeleteResult.getNotDeletedEligibleItems()));
}
});
}
Aggregations