use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method replaceTable.
@Override
public synchronized void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) {
Table table = getRequiredTable(databaseName, tableName);
if (!table.getDatabaseName().equals(databaseName) || !table.getTableName().equals(tableName)) {
throw new TrinoException(HIVE_METASTORE_ERROR, "Replacement table must have same name");
}
Path tableMetadataDirectory = getTableMetadataDirectory(table);
writeSchemaFile(TABLE, tableMetadataDirectory, tableCodec, new TableMetadata(currentVersion, newTable), true);
// replace existing permissions
deleteTablePrivileges(table);
for (Entry<String, Collection<HivePrivilegeInfo>> entry : principalPrivileges.getUserPrivileges().asMap().entrySet()) {
setTablePrivileges(new HivePrincipal(USER, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue());
}
for (Entry<String, Collection<HivePrivilegeInfo>> entry : principalPrivileges.getRolePrivileges().asMap().entrySet()) {
setTablePrivileges(new HivePrincipal(ROLE, entry.getKey()), table.getDatabaseName(), table.getTableName(), entry.getValue());
}
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method alterPartition.
@Override
public synchronized void alterPartition(String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) {
Table table = getRequiredTable(databaseName, tableName);
Partition partition = partitionWithStatistics.getPartition();
verifiedPartition(table, partition);
Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues());
writeSchemaFile(PARTITION, partitionMetadataDirectory, partitionCodec, new PartitionMetadata(table, partitionWithStatistics), true);
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class FileHiveMetastore method getAllPartitionNames.
private synchronized Optional<List<String>> getAllPartitionNames(String databaseName, String tableName) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
Optional<Table> tableReference = getTable(databaseName, tableName);
if (tableReference.isEmpty()) {
return Optional.empty();
}
Table table = tableReference.get();
Path tableMetadataDirectory = getTableMetadataDirectory(table);
List<ArrayDeque<String>> partitions = listPartitions(tableMetadataDirectory, table.getPartitionColumns());
List<String> partitionNames = partitions.stream().map(partitionValues -> makePartitionName(table.getPartitionColumns(), ImmutableList.copyOf(partitionValues))).filter(partitionName -> isValidPartition(table, partitionName)).collect(toList());
return Optional.of(ImmutableList.copyOf(partitionNames));
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class GlueToTrinoConverter method convertTable.
public static Table convertTable(com.amazonaws.services.glue.model.Table glueTable, String dbName) {
Map<String, String> tableParameters = convertParameters(glueTable.getParameters());
Table.Builder tableBuilder = Table.builder().setDatabaseName(dbName).setTableName(glueTable.getName()).setOwner(Optional.ofNullable(glueTable.getOwner())).setTableType(firstNonNull(glueTable.getTableType(), EXTERNAL_TABLE.name())).setParameters(tableParameters).setViewOriginalText(Optional.ofNullable(glueTable.getViewOriginalText())).setViewExpandedText(Optional.ofNullable(glueTable.getViewExpandedText()));
StorageDescriptor sd = glueTable.getStorageDescriptor();
if (sd == null) {
if (isIcebergTable(tableParameters) || isDeltaLakeTable(tableParameters)) {
// Iceberg and Delta Lake tables do not use the StorageDescriptor field, but we need to return a Table so the caller can check that
// the table is an Iceberg/Delta table and decide whether to redirect or fail.
tableBuilder.setDataColumns(ImmutableList.of(new Column("dummy", HIVE_INT, Optional.empty())));
tableBuilder.getStorageBuilder().setStorageFormat(StorageFormat.fromHiveStorageFormat(HiveStorageFormat.PARQUET));
} else {
throw new TrinoException(HIVE_UNSUPPORTED_FORMAT, format("Table StorageDescriptor is null for table %s.%s (%s)", dbName, glueTable.getName(), glueTable));
}
} else {
tableBuilder.setDataColumns(convertColumns(sd.getColumns(), sd.getSerdeInfo().getSerializationLibrary()));
if (glueTable.getPartitionKeys() != null) {
tableBuilder.setPartitionColumns(convertColumns(glueTable.getPartitionKeys(), sd.getSerdeInfo().getSerializationLibrary()));
} else {
tableBuilder.setPartitionColumns(ImmutableList.of());
}
// No benefit to memoizing here, just reusing the implementation
new StorageConverter().setStorageBuilder(sd, tableBuilder.getStorageBuilder(), tableParameters);
}
return tableBuilder.build();
}
use of io.trino.plugin.hive.metastore.Table in project trino by trinodb.
the class DefaultGlueColumnStatisticsProvider method getTableColumnStatistics.
@Override
public Map<String, HiveColumnStatistics> getTableColumnStatistics(Table table) {
try {
List<String> columnNames = getAllColumns(table);
List<List<String>> columnChunks = Lists.partition(columnNames, GLUE_COLUMN_READ_STAT_PAGE_SIZE);
List<CompletableFuture<GetColumnStatisticsForTableResult>> getStatsFutures = columnChunks.stream().map(partialColumns -> supplyAsync(() -> {
GetColumnStatisticsForTableRequest request = new GetColumnStatisticsForTableRequest().withCatalogId(catalogId).withDatabaseName(table.getDatabaseName()).withTableName(table.getTableName()).withColumnNames(partialColumns);
return stats.getGetColumnStatisticsForTable().call(() -> glueClient.getColumnStatisticsForTable(request));
}, readExecutor)).collect(toImmutableList());
HiveBasicStatistics tableStatistics = getHiveBasicStatistics(table.getParameters());
ImmutableMap.Builder<String, HiveColumnStatistics> columnStatsMapBuilder = ImmutableMap.builder();
for (CompletableFuture<GetColumnStatisticsForTableResult> future : getStatsFutures) {
GetColumnStatisticsForTableResult tableColumnsStats = getFutureValue(future, TrinoException.class);
for (ColumnStatistics columnStatistics : tableColumnsStats.getColumnStatisticsList()) {
columnStatsMapBuilder.put(columnStatistics.getColumnName(), fromGlueColumnStatistics(columnStatistics.getStatisticsData(), tableStatistics.getRowCount()));
}
}
return columnStatsMapBuilder.buildOrThrow();
} catch (RuntimeException ex) {
throw new TrinoException(HIVE_METASTORE_ERROR, ex);
}
}
Aggregations