Search in sources :

Example 16 with Partition

use of io.prestosql.plugin.hive.metastore.Partition in project hetu-core by openlookeng.

the class HiveSplitManager method getSplits.

@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, Supplier<List<Set<DynamicFilter>>> dynamicFilterSupplier, Optional<QueryType> queryType, Map<String, Object> queryInfo, Set<TupleDomain<ColumnMetadata>> userDefinedCachePredicates, boolean partOfReuse) {
    HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
    SchemaTableName tableName = hiveTable.getSchemaTableName();
    // get table metadata
    SemiTransactionalHiveMetastore metastore = metastoreProvider.apply((HiveTransactionHandle) transaction);
    Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
    if (table.getStorage().getStorageFormat().getInputFormat().contains("carbon")) {
        throw new PrestoException(NOT_SUPPORTED, "Hive connector can't read carbondata tables");
    }
    // verify table is not marked as non-readable
    String tableNotReadable = table.getParameters().get(OBJECT_NOT_READABLE);
    if (!isNullOrEmpty(tableNotReadable)) {
        throw new HiveNotReadableException(tableName, Optional.empty(), tableNotReadable);
    }
    // get partitions
    List<HivePartition> partitions = partitionManager.getOrLoadPartitions(session, metastore, new HiveIdentity(session), hiveTable);
    // short circuit if we don't have any partitions
    if (partitions.isEmpty()) {
        return new FixedSplitSource(ImmutableList.of());
    }
    // get buckets from first partition (arbitrary)
    Optional<HiveBucketing.HiveBucketFilter> bucketFilter = hiveTable.getBucketFilter();
    // validate bucket bucketed execution
    Optional<HiveBucketHandle> bucketHandle = hiveTable.getBucketHandle();
    if ((splitSchedulingStrategy == GROUPED_SCHEDULING) && !bucketHandle.isPresent()) {
        throw new PrestoException(GENERIC_INTERNAL_ERROR, "SchedulingPolicy is bucketed, but BucketHandle is not present");
    }
    // sort partitions
    partitions = Ordering.natural().onResultOf(HivePartition::getPartitionId).reverse().sortedCopy(partitions);
    Iterable<HivePartitionMetadata> hivePartitions = getPartitionMetadata(session, metastore, table, tableName, partitions, bucketHandle.map(HiveBucketHandle::toTableBucketProperty));
    HiveSplitLoader hiveSplitLoader = new BackgroundHiveSplitLoader(table, hivePartitions, hiveTable.getCompactEffectivePredicate(), BackgroundHiveSplitLoader.BucketSplitInfo.createBucketSplitInfo(bucketHandle, bucketFilter), session, hdfsEnvironment, namenodeStats, directoryLister, executor, splitLoaderConcurrency, recursiveDfsWalkerEnabled, metastore.getValidWriteIds(session, hiveTable, queryType.map(t -> t == QueryType.VACUUM).orElse(false)).map(validTxnWriteIdList -> validTxnWriteIdList.getTableValidWriteIdList(table.getDatabaseName() + "." + table.getTableName())), dynamicFilterSupplier, queryType, queryInfo, typeManager);
    HiveSplitSource splitSource;
    HiveStorageFormat hiveStorageFormat = HiveMetadata.extractHiveStorageFormat(table);
    switch(splitSchedulingStrategy) {
        case UNGROUPED_SCHEDULING:
            splitSource = HiveSplitSource.allAtOnce(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
            partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
            break;
        case GROUPED_SCHEDULING:
            splitSource = HiveSplitSource.bucketed(session, table.getDatabaseName(), table.getTableName(), // For reuse, we should make sure to have same split size all time for a table.
            partOfReuse ? 0 : maxInitialSplits, maxOutstandingSplits, maxOutstandingSplitsSize, maxSplitsPerSecond, hiveSplitLoader, executor, new CounterStat(), dynamicFilterSupplier, userDefinedCachePredicates, typeManager, hiveConfig, hiveStorageFormat);
            break;
        default:
            throw new IllegalArgumentException("Unknown splitSchedulingStrategy: " + splitSchedulingStrategy);
    }
    hiveSplitLoader.start(splitSource);
    if (queryType.isPresent() && queryType.get() == QueryType.VACUUM) {
        HdfsContext hdfsContext = new HdfsContext(session, table.getDatabaseName(), table.getTableName());
        return new HiveVacuumSplitSource(splitSource, (HiveVacuumTableHandle) queryInfo.get("vacuumHandle"), hdfsEnvironment, hdfsContext, session);
    }
    return splitSource;
}
Also used : VersionEmbedder(io.prestosql.spi.VersionEmbedder) GROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.GROUPED_SCHEDULING) Iterables.transform(com.google.common.collect.Iterables.transform) DynamicFilter(io.prestosql.spi.dynamicfilter.DynamicFilter) QueryType(io.prestosql.spi.resourcegroups.QueryType) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) ConnectorSplitManager(io.prestosql.spi.connector.ConnectorSplitManager) MetastoreUtil(io.prestosql.plugin.hive.metastore.MetastoreUtil) SERVER_SHUTTING_DOWN(io.prestosql.spi.StandardErrorCode.SERVER_SHUTTING_DOWN) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) BoundedExecutor(io.airlift.concurrent.BoundedExecutor) Iterables.concat(com.google.common.collect.Iterables.concat) Map(java.util.Map) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) Math.min(java.lang.Math.min) String.format(java.lang.String.format) DataSize(io.airlift.units.DataSize) List(java.util.List) Table(io.prestosql.plugin.hive.metastore.Table) GENERIC_INTERNAL_ERROR(io.prestosql.spi.StandardErrorCode.GENERIC_INTERNAL_ERROR) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) MoreObjects.firstNonNull(com.google.common.base.MoreObjects.firstNonNull) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) Nested(org.weakref.jmx.Nested) Partition(io.prestosql.plugin.hive.metastore.Partition) Strings.isNullOrEmpty(com.google.common.base.Strings.isNullOrEmpty) CounterStat(io.airlift.stats.CounterStat) Function(java.util.function.Function) Supplier(java.util.function.Supplier) Inject(javax.inject.Inject) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Managed(org.weakref.jmx.Managed) Objects.requireNonNull(java.util.Objects.requireNonNull) FixedSplitSource(io.prestosql.spi.connector.FixedSplitSource) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) Nullable(javax.annotation.Nullable) ExecutorService(java.util.concurrent.ExecutorService) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) Iterator(java.util.Iterator) Executor(java.util.concurrent.Executor) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) TupleDomain(io.prestosql.spi.predicate.TupleDomain) AbstractIterator(com.google.common.collect.AbstractIterator) TypeManager(io.prestosql.spi.type.TypeManager) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) Ordering(com.google.common.collect.Ordering) Column(io.prestosql.plugin.hive.metastore.Column) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) CounterStat(io.airlift.stats.CounterStat) PrestoException(io.prestosql.spi.PrestoException) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) FixedSplitSource(io.prestosql.spi.connector.FixedSplitSource) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Table(io.prestosql.plugin.hive.metastore.Table) SchemaTableName(io.prestosql.spi.connector.SchemaTableName)

Example 17 with Partition

use of io.prestosql.plugin.hive.metastore.Partition in project boostkit-bigdata by kunpengcompute.

the class GlueHiveMetastore method batchGetPartition.

private List<Partition> batchGetPartition(String databaseName, String tableName, List<String> partitionNames) {
    try {
        List<PartitionValueList> partitionValueLists = partitionNames.stream().map(partitionName -> new PartitionValueList().withValues(toPartitionValues(partitionName))).collect(toList());
        List<List<PartitionValueList>> batchedPartitionValueLists = Lists.partition(partitionValueLists, BATCH_GET_PARTITION_MAX_PAGE_SIZE);
        List<Future<BatchGetPartitionResult>> batchGetPartitionFutures = new ArrayList<>();
        List<Partition> result = new ArrayList<>();
        for (List<PartitionValueList> partitions : batchedPartitionValueLists) {
            batchGetPartitionFutures.add(glueClient.batchGetPartitionAsync(new BatchGetPartitionRequest().withCatalogId(catalogId).withDatabaseName(databaseName).withTableName(tableName).withPartitionsToGet(partitions)));
        }
        for (Future<BatchGetPartitionResult> future : batchGetPartitionFutures) {
            future.get().getPartitions().forEach(partition -> result.add(GlueToPrestoConverter.convertPartition(partition)));
        }
        return result;
    } catch (AmazonServiceException | InterruptedException | ExecutionException e) {
        if (e instanceof InterruptedException) {
            Thread.currentThread().interrupt();
        }
        throw new PrestoException(HiveErrorCode.HIVE_METASTORE_ERROR, e);
    }
}
Also used : AWSStaticCredentialsProvider(com.amazonaws.auth.AWSStaticCredentialsProvider) UnaryOperator.identity(java.util.function.UnaryOperator.identity) DefaultAWSCredentialsProviderChain(com.amazonaws.auth.DefaultAWSCredentialsProviderChain) HiveWriteUtils(io.prestosql.plugin.hive.HiveWriteUtils) MetastoreUtil(io.prestosql.plugin.hive.metastore.MetastoreUtil) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) RoleGrant(io.prestosql.spi.security.RoleGrant) DeleteTableRequest(com.amazonaws.services.glue.model.DeleteTableRequest) SchemaAlreadyExistsException(io.prestosql.spi.connector.SchemaAlreadyExistsException) Future(java.util.concurrent.Future) GetDatabasesResult(com.amazonaws.services.glue.model.GetDatabasesResult) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) Map(java.util.Map) BatchCreatePartitionRequest(com.amazonaws.services.glue.model.BatchCreatePartitionRequest) GetTablesResult(com.amazonaws.services.glue.model.GetTablesResult) HiveErrorCode(io.prestosql.plugin.hive.HiveErrorCode) AmazonServiceException(com.amazonaws.AmazonServiceException) DeletePartitionRequest(com.amazonaws.services.glue.model.DeletePartitionRequest) Set(java.util.Set) DatabaseInput(com.amazonaws.services.glue.model.DatabaseInput) TableInput(com.amazonaws.services.glue.model.TableInput) UpdateTableRequest(com.amazonaws.services.glue.model.UpdateTableRequest) MANAGED_TABLE(org.apache.hadoop.hive.metastore.TableType.MANAGED_TABLE) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Table(io.prestosql.plugin.hive.metastore.Table) ThriftMetastoreUtil(io.prestosql.plugin.hive.metastore.thrift.ThriftMetastoreUtil) PartitionInput(com.amazonaws.services.glue.model.PartitionInput) GlueInputConverter(io.prestosql.plugin.hive.metastore.glue.converter.GlueInputConverter) EntityNotFoundException(com.amazonaws.services.glue.model.EntityNotFoundException) AWSGlueAsync(com.amazonaws.services.glue.AWSGlueAsync) GetPartitionsRequest(com.amazonaws.services.glue.model.GetPartitionsRequest) ALREADY_EXISTS(io.prestosql.spi.StandardErrorCode.ALREADY_EXISTS) Database(io.prestosql.plugin.hive.metastore.Database) GetPartitionResult(com.amazonaws.services.glue.model.GetPartitionResult) Partition(io.prestosql.plugin.hive.metastore.Partition) Strings.isNullOrEmpty(com.google.common.base.Strings.isNullOrEmpty) Regions(com.amazonaws.regions.Regions) ArrayList(java.util.ArrayList) HdfsEnvironment(io.prestosql.plugin.hive.HdfsEnvironment) Lists(com.google.common.collect.Lists) AlreadyExistsException(com.amazonaws.services.glue.model.AlreadyExistsException) GlueToPrestoConverter(io.prestosql.plugin.hive.metastore.glue.converter.GlueToPrestoConverter) AWSCredentialsProvider(com.amazonaws.auth.AWSCredentialsProvider) CreateTableRequest(com.amazonaws.services.glue.model.CreateTableRequest) CreateDatabaseRequest(com.amazonaws.services.glue.model.CreateDatabaseRequest) HiveMetastore(io.prestosql.plugin.hive.metastore.HiveMetastore) PartitionStatistics(io.prestosql.plugin.hive.PartitionStatistics) PartitionValueList(com.amazonaws.services.glue.model.PartitionValueList) GetTableResult(com.amazonaws.services.glue.model.GetTableResult) USER(io.prestosql.spi.security.PrincipalType.USER) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) ExecutionException(java.util.concurrent.ExecutionException) ClientConfiguration(com.amazonaws.ClientConfiguration) GetPartitionsResult(com.amazonaws.services.glue.model.GetPartitionsResult) PartitionNotFoundException(io.prestosql.plugin.hive.PartitionNotFoundException) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) GetDatabaseRequest(com.amazonaws.services.glue.model.GetDatabaseRequest) ColumnNotFoundException(io.prestosql.spi.connector.ColumnNotFoundException) GetDatabasesRequest(com.amazonaws.services.glue.model.GetDatabasesRequest) Collectors.toMap(java.util.stream.Collectors.toMap) Path(org.apache.hadoop.fs.Path) Type(io.prestosql.spi.type.Type) PrestoException(io.prestosql.spi.PrestoException) ImmutableSet(com.google.common.collect.ImmutableSet) AWSGlueAsyncClientBuilder(com.amazonaws.services.glue.AWSGlueAsyncClientBuilder) ImmutableMap(com.google.common.collect.ImmutableMap) STSAssumeRoleSessionCredentialsProvider(com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider) HiveType(io.prestosql.plugin.hive.HiveType) HIVE_PARTITION_DROPPED_DURING_QUERY(io.prestosql.plugin.hive.HiveErrorCode.HIVE_PARTITION_DROPPED_DURING_QUERY) List(java.util.List) GetTableRequest(com.amazonaws.services.glue.model.GetTableRequest) PartitionError(com.amazonaws.services.glue.model.PartitionError) Entry(java.util.Map.Entry) Optional(java.util.Optional) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) UpdateDatabaseRequest(com.amazonaws.services.glue.model.UpdateDatabaseRequest) Logger(io.airlift.log.Logger) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) HivePrincipal(io.prestosql.plugin.hive.metastore.HivePrincipal) Function(java.util.function.Function) ColumnStatisticType(io.prestosql.spi.statistics.ColumnStatisticType) MetastoreUtil.makePartitionName(io.prestosql.plugin.hive.metastore.MetastoreUtil.makePartitionName) Inject(javax.inject.Inject) GetPartitionRequest(com.amazonaws.services.glue.model.GetPartitionRequest) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) ImmutableList(com.google.common.collect.ImmutableList) HivePrivilegeInfo(io.prestosql.plugin.hive.metastore.HivePrivilegeInfo) Objects.requireNonNull(java.util.Objects.requireNonNull) ThriftMetastoreUtil.getHiveBasicStatistics(io.prestosql.plugin.hive.metastore.thrift.ThriftMetastoreUtil.getHiveBasicStatistics) DeleteDatabaseRequest(com.amazonaws.services.glue.model.DeleteDatabaseRequest) VIRTUAL_VIEW(org.apache.hadoop.hive.metastore.TableType.VIRTUAL_VIEW) BatchGetPartitionRequest(com.amazonaws.services.glue.model.BatchGetPartitionRequest) BatchCreatePartitionResult(com.amazonaws.services.glue.model.BatchCreatePartitionResult) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) BasicAWSCredentials(com.amazonaws.auth.BasicAWSCredentials) BatchGetPartitionResult(com.amazonaws.services.glue.model.BatchGetPartitionResult) HiveUtil(io.prestosql.plugin.hive.HiveUtil) PartitionWithStatistics(io.prestosql.plugin.hive.metastore.PartitionWithStatistics) ErrorDetail(com.amazonaws.services.glue.model.ErrorDetail) SchemaNotFoundException(io.prestosql.spi.connector.SchemaNotFoundException) ConnectorIdentity(io.prestosql.spi.security.ConnectorIdentity) GlueExpressionUtil.buildGlueExpression(io.prestosql.plugin.hive.metastore.glue.GlueExpressionUtil.buildGlueExpression) Collectors.toList(java.util.stream.Collectors.toList) GetDatabaseResult(com.amazonaws.services.glue.model.GetDatabaseResult) Column(io.prestosql.plugin.hive.metastore.Column) GetTablesRequest(com.amazonaws.services.glue.model.GetTablesRequest) UpdatePartitionRequest(com.amazonaws.services.glue.model.UpdatePartitionRequest) Region(com.amazonaws.regions.Region) Partition(io.prestosql.plugin.hive.metastore.Partition) BatchGetPartitionRequest(com.amazonaws.services.glue.model.BatchGetPartitionRequest) ArrayList(java.util.ArrayList) PrestoException(io.prestosql.spi.PrestoException) BatchGetPartitionResult(com.amazonaws.services.glue.model.BatchGetPartitionResult) PartitionValueList(com.amazonaws.services.glue.model.PartitionValueList) AmazonServiceException(com.amazonaws.AmazonServiceException) Future(java.util.concurrent.Future) ArrayList(java.util.ArrayList) PartitionValueList(com.amazonaws.services.glue.model.PartitionValueList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) ExecutionException(java.util.concurrent.ExecutionException)

Example 18 with Partition

use of io.prestosql.plugin.hive.metastore.Partition in project boostkit-bigdata by kunpengcompute.

the class GlueHiveMetastore method getPartitionsByNames.

/**
 * <pre>
 * Ex: Partition keys = ['a', 'b']
 *     Partition names = ['a=1/b=2', 'a=2/b=2']
 * </pre>
 *
 * @param partitionNames List of full partition names
 * @return Mapping of partition name to partition object
 */
@Override
public Map<String, Optional<Partition>> getPartitionsByNames(HiveIdentity identity, String databaseName, String tableName, List<String> partitionNames) {
    requireNonNull(partitionNames, "partitionNames is null");
    if (partitionNames.isEmpty()) {
        return ImmutableMap.of();
    }
    List<Partition> partitions = batchGetPartition(databaseName, tableName, partitionNames);
    Map<String, List<String>> partitionNameToPartitionValuesMap = partitionNames.stream().collect(toMap(identity(), HiveUtil::toPartitionValues));
    Map<List<String>, Partition> partitionValuesToPartitionMap = partitions.stream().collect(toMap(Partition::getValues, identity()));
    ImmutableMap.Builder<String, Optional<Partition>> resultBuilder = ImmutableMap.builder();
    for (Entry<String, List<String>> entry : partitionNameToPartitionValuesMap.entrySet()) {
        Partition partition = partitionValuesToPartitionMap.get(entry.getValue());
        resultBuilder.put(entry.getKey(), Optional.ofNullable(partition));
    }
    return resultBuilder.build();
}
Also used : Partition(io.prestosql.plugin.hive.metastore.Partition) Optional(java.util.Optional) ArrayList(java.util.ArrayList) PartitionValueList(com.amazonaws.services.glue.model.PartitionValueList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 19 with Partition

use of io.prestosql.plugin.hive.metastore.Partition in project boostkit-bigdata by kunpengcompute.

the class GlueHiveMetastore method getPartitions.

private List<Partition> getPartitions(String databaseName, String tableName, String expression) {
    try {
        List<Partition> partitions = new ArrayList<>();
        String nextToken = null;
        do {
            GetPartitionsResult result = glueClient.getPartitions(new GetPartitionsRequest().withCatalogId(catalogId).withDatabaseName(databaseName).withTableName(tableName).withExpression(expression).withNextToken(nextToken));
            result.getPartitions().forEach(partition -> partitions.add(GlueToPrestoConverter.convertPartition(partition)));
            nextToken = result.getNextToken();
        } while (nextToken != null);
        return partitions;
    } catch (AmazonServiceException e) {
        throw new PrestoException(HiveErrorCode.HIVE_METASTORE_ERROR, e);
    }
}
Also used : Partition(io.prestosql.plugin.hive.metastore.Partition) GetPartitionsResult(com.amazonaws.services.glue.model.GetPartitionsResult) ArrayList(java.util.ArrayList) AmazonServiceException(com.amazonaws.AmazonServiceException) PrestoException(io.prestosql.spi.PrestoException) GetPartitionsRequest(com.amazonaws.services.glue.model.GetPartitionsRequest)

Example 20 with Partition

use of io.prestosql.plugin.hive.metastore.Partition in project boostkit-bigdata by kunpengcompute.

the class GlueHiveMetastore method dropPartition.

@Override
public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List<String> parts, boolean deleteData) {
    Table table = getTableOrElseThrow(identity, databaseName, tableName);
    Partition partition = getPartition(identity, databaseName, tableName, parts).orElseThrow(() -> new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), parts));
    try {
        glueClient.deletePartition(new DeletePartitionRequest().withCatalogId(catalogId).withDatabaseName(databaseName).withTableName(tableName).withPartitionValues(parts));
    } catch (AmazonServiceException e) {
        throw new PrestoException(HiveErrorCode.HIVE_METASTORE_ERROR, e);
    }
    String partLocation = partition.getStorage().getLocation();
    if (deleteData && isManagedTable(table) && !isNullOrEmpty(partLocation)) {
        deleteDir(hdfsContext, hdfsEnvironment, new Path(partLocation), true);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Partition(io.prestosql.plugin.hive.metastore.Partition) PartitionNotFoundException(io.prestosql.plugin.hive.PartitionNotFoundException) Table(io.prestosql.plugin.hive.metastore.Table) AmazonServiceException(com.amazonaws.AmazonServiceException) DeletePartitionRequest(com.amazonaws.services.glue.model.DeletePartitionRequest) PrestoException(io.prestosql.spi.PrestoException) SchemaTableName(io.prestosql.spi.connector.SchemaTableName)

Aggregations

Partition (io.prestosql.plugin.hive.metastore.Partition)62 PrestoException (io.prestosql.spi.PrestoException)42 Table (io.prestosql.plugin.hive.metastore.Table)41 Optional (java.util.Optional)37 ImmutableMap (com.google.common.collect.ImmutableMap)33 List (java.util.List)33 Map (java.util.Map)33 Path (org.apache.hadoop.fs.Path)33 ImmutableList (com.google.common.collect.ImmutableList)31 HiveIdentity (io.prestosql.plugin.hive.authentication.HiveIdentity)29 Column (io.prestosql.plugin.hive.metastore.Column)29 ConnectorSession (io.prestosql.spi.connector.ConnectorSession)29 SchemaTableName (io.prestosql.spi.connector.SchemaTableName)29 HdfsContext (io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext)28 Objects.requireNonNull (java.util.Objects.requireNonNull)27 Set (java.util.Set)27 ImmutableMap.toImmutableMap (com.google.common.collect.ImmutableMap.toImmutableMap)26 ImmutableSet (com.google.common.collect.ImmutableSet)25 NOT_SUPPORTED (io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED)25 TableNotFoundException (io.prestosql.spi.connector.TableNotFoundException)25