Search in sources :

Example 1 with PartitionInfo

use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.

the class S3ConnectorPartitionService method _getConnectorPartitions.

@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getConnectorPartitions(final QualifiedName tableName, final String filterExpression, final List<String> partitionIds, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
    // Support for dateCreated
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
    String dateCreatedSqlCriteria = null;
    if (hasDateCreated) {
        dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
    }
    // Table
    final Table table = getTable(tableName);
    final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
    final List<Partition> partitions = partitionDao.getPartitions(table.getId(), partitionIds, singlePartitionExprs, dateCreatedSqlCriteria, sort, Strings.isNullOrEmpty(filterExpression) ? pageable : null);
    final FilterPartition filter = new FilterPartition();
    List<PartitionInfo> result = partitions.stream().filter(partition -> {
        Map<String, String> values = null;
        if (hasDateCreated) {
            values = Maps.newHashMap();
            values.put(FIELD_DATE_CREATED, (partition.getCreatedDate().getTime() / 1000) + "");
        }
        return Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, partition.getName(), partition.getUri(), isBatched, values);
    }).map(partition -> infoConverter.toPartitionInfo(tableName, table, partition)).collect(Collectors.toList());
    //
    if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
        int limit = pageable.getOffset() + pageable.getLimit();
        if (result.size() < limit) {
            limit = result.size();
        }
        if (pageable.getOffset() > limit) {
            result = Lists.newArrayList();
        } else {
            result = result.subList(pageable.getOffset(), limit);
        }
    }
    return result;
}
Also used : TableDao(com.netflix.metacat.connector.s3.dao.TableDao) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) Transactional(com.google.inject.persist.Transactional) Partition(com.netflix.metacat.connector.s3.model.Partition) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) ConnectorPartitionService(com.netflix.metacat.common.server.connectors.ConnectorPartitionService) Lists(com.google.common.collect.Lists) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Named(javax.inject.Named) PartitionUtil(com.netflix.metacat.common.server.partition.util.PartitionUtil) Nonnull(javax.annotation.Nonnull) PartitionDao(com.netflix.metacat.connector.s3.dao.PartitionDao) Nullable(javax.annotation.Nullable) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) BaseInfo(com.netflix.metacat.common.server.connectors.model.BaseInfo) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) PartitionsSaveResponse(com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) StringReader(java.io.StringReader) PartitionsSaveRequest(com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest) Table(com.netflix.metacat.connector.s3.model.Table) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) PartitionNotFoundException(com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException) Sort(com.netflix.metacat.common.dto.Sort) Partition(com.netflix.metacat.connector.s3.model.Partition) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) Table(com.netflix.metacat.connector.s3.model.Table) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo)

Example 2 with PartitionInfo

use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getpartitions.

private List<PartitionInfo> getpartitions(@Nonnull @NonNull final String databaseName, @Nonnull @NonNull final String tableName, @Nullable final List<String> partitionIds, final String filterExpression, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
    final FilterPartition filter = new FilterPartition();
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
    // Handler for reading the result set
    final ResultSetHandler<List<PartitionDetail>> handler = rs -> {
        final List<PartitionDetail> result = Lists.newArrayList();
        while (rs.next()) {
            final String name = rs.getString("name");
            final String uri = rs.getString("uri");
            final long createdDate = rs.getLong(FIELD_DATE_CREATED);
            Map<String, String> values = null;
            if (hasDateCreated) {
                values = Maps.newHashMap();
                values.put(FIELD_DATE_CREATED, createdDate + "");
            }
            if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                final Long id = rs.getLong("id");
                final Long sdId = rs.getLong("sd_id");
                final Long serdeId = rs.getLong("serde_id");
                final String inputFormat = rs.getString("input_format");
                final String outputFormat = rs.getString("output_format");
                final String serializationLib = rs.getString("slib");
                final StorageInfo storageInfo = new StorageInfo();
                storageInfo.setUri(uri);
                storageInfo.setInputFormat(inputFormat);
                storageInfo.setOutputFormat(outputFormat);
                storageInfo.setSerializationLib(serializationLib);
                final AuditInfo auditInfo = new AuditInfo();
                auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                result.add(new PartitionDetail(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
            }
        }
        return result;
    };
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    final List<PartitionDetail> partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL_GET_PARTITIONS, handler, sort, pageable);
    if (includePartitionDetails && !partitions.isEmpty()) {
        final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
        for (PartitionDetail partitionDetail : partitions) {
            partIds.add(partitionDetail.getId());
            sdIds.add(partitionDetail.getSdId());
            serdeIds.add(partitionDetail.getSerdeId());
        }
        final List<ListenableFuture<Void>> futures = Lists.newArrayList();
        final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
        futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
        final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
        if (!sdIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL_GET_SD_PARAMS, "sd_id", sdParams)));
        }
        final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
        if (!serdeIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
        }
        try {
            Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1, TimeUnit.HOURS);
        } catch (Exception e) {
            Throwables.propagate(e);
        }
        for (PartitionDetail partitionDetail : partitions) {
            partitionDetail.getPartitionInfo().setMetadata(partitionParams.get(partitionDetail.getId()));
            partitionDetail.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionDetail.getSdId()));
            partitionDetail.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionDetail.getSerdeId()));
        }
    }
    for (PartitionDetail partitionDetail : partitions) {
        partitionInfos.add(partitionDetail.getPartitionInfo());
    }
    return partitionInfos;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ArrayList(java.util.ArrayList) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) SQLException(java.sql.SQLException) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) List(java.util.List) ArrayList(java.util.ArrayList) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap)

Example 3 with PartitionInfo

use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.

the class HiveConnectorInfoConverter method fromPartitionInfo.

/**
     * Converts from PartitionDto to the connector partition.
     *
     * @param partition Metacat partition Info
     * @return connector partition
     */
@Override
public Partition fromPartitionInfo(@Nonnull @NonNull final TableInfo tableInfo, @Nonnull @NonNull final PartitionInfo partition) {
    final QualifiedName name = partition.getName();
    final List<String> values = Lists.newArrayListWithCapacity(16);
    Map<String, String> metadata = partition.getMetadata();
    if (metadata == null) {
        metadata = new HashMap<>();
    //can't use Collections.emptyMap()
    // which is immutable and can't be
    // modifed by add parts in the embedded
    }
    final List<FieldInfo> fields = tableInfo.getFields();
    List<FieldSchema> fieldSchemas = Collections.emptyList();
    if (notNull(fields)) {
        fieldSchemas = fields.stream().filter(field -> !field.isPartitionKey()).map(this::metacatToHiveField).collect(Collectors.toList());
    }
    final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
    //using the table level serialization lib
    if (notNull(sd.getSerdeInfo()) && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
        sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
    }
    final AuditInfo auditInfo = partition.getAudit();
    final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate())) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
    final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate())) ? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
    if (null == name) {
        return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
    }
    if (notNull(name.getPartitionName())) {
        for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
            final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
            Preconditions.checkState(nameValues.size() == 2, "Unrecognized partition name: " + partition.getName());
            values.add(nameValues.get(1));
        }
    }
    final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
    final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
    return new Partition(values, databaseName, tableName, createTime, lastAccessTime, sd, metadata);
}
Also used : Date(java.util.Date) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) Partition(org.apache.hadoop.hive.metastore.api.Partition) Strings(com.google.common.base.Strings) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ConnectorInfoConverter(com.netflix.metacat.common.server.connectors.ConnectorInfoConverter) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) LinkedList(java.util.LinkedList) Splitter(com.google.common.base.Splitter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nonnull(javax.annotation.Nonnull) NonNull(lombok.NonNull) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) HiveTableUtil(com.netflix.metacat.connector.hive.util.HiveTableUtil) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Database(org.apache.hadoop.hive.metastore.api.Database) Collections(java.util.Collections) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) Partition(org.apache.hadoop.hive.metastore.api.Partition) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo)

Example 4 with PartitionInfo

use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.

the class PartitionServiceImpl method save.

@Override
public PartitionsSaveResponseDto save(@Nonnull final QualifiedName name, final PartitionsSaveRequestDto dto) {
    PartitionsSaveResponseDto result = new PartitionsSaveResponseDto();
    final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
    final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
    final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
    final List<PartitionDto> partitionDtos = dto.getPartitions();
    // If no partitions are passed, then return
    if (partitionDtos == null || partitionDtos.isEmpty()) {
        return result;
    }
    final List<String> partitionIdsForDeletes = dto.getPartitionIdsForDeletes();
    registry.gauge(this.partitionAddedCountId.withTags(new HashMap<>(name.parts())), partitionDtos.size());
    if (!tableService.exists(name)) {
        throw new TableNotFoundException(name);
    }
    List<HasMetadata> deletePartitions = Lists.newArrayList();
    if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
        eventBus.postSync(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
        registry.gauge(this.partitionDeletedCountId.withTags(new HashMap<>(name.parts())), partitionIdsForDeletes.size());
        final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
        requestDto.setIncludePartitionDetails(false);
        requestDto.setPartitionNames(partitionIdsForDeletes);
        final List<PartitionInfo> deletePartitionInfos = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, null, null));
        if (deletePartitionInfos != null) {
            deletePartitions = deletePartitionInfos.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
        }
    }
    //
    // Save all the new and updated partitions
    //
    eventBus.postSync(new MetacatSaveTablePartitionPreEvent(name, metacatRequestContext, this, dto));
    log.info("Saving partitions({}) for {}", partitionDtos.size(), name);
    result = converterUtil.toPartitionsSaveResponseDto(service.savePartitions(connectorContext, name, converterUtil.toPartitionsSaveRequest(dto)));
    // Save metadata
    log.info("Saving user metadata for partitions for {}", name);
    // delete metadata
    if (!deletePartitions.isEmpty()) {
        log.info("Deleting user metadata for partitions with names {} for {}", partitionIdsForDeletes, name);
        deleteMetadatas(metacatRequestContext.getUserName(), deletePartitions);
    }
    userMetadataService.saveMetadatas(metacatRequestContext.getUserName(), partitionDtos, true);
    eventBus.postAsync(new MetacatSaveTablePartitionPostEvent(name, metacatRequestContext, this, partitionDtos, result));
    if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
        eventBus.postAsync(new MetacatDeleteTablePartitionPostEvent(name, metacatRequestContext, this, partitionIdsForDeletes));
    }
    return result;
}
Also used : MetacatRequestContext(com.netflix.metacat.common.MetacatRequestContext) HasMetadata(com.netflix.metacat.common.dto.HasMetadata) HashMap(java.util.HashMap) MetacatSaveTablePartitionPreEvent(com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPreEvent) GetPartitionsRequestDto(com.netflix.metacat.common.dto.GetPartitionsRequestDto) MetacatDeleteTablePartitionPreEvent(com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPreEvent) MetacatDeleteTablePartitionPostEvent(com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) MetacatSaveTablePartitionPostEvent(com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) PartitionDto(com.netflix.metacat.common.dto.PartitionDto) ConnectorPartitionService(com.netflix.metacat.common.server.connectors.ConnectorPartitionService) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) PartitionsSaveResponseDto(com.netflix.metacat.common.dto.PartitionsSaveResponseDto)

Example 5 with PartitionInfo

use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.

the class HiveConnectorPartitionService method getPartitions.

/**
     * {@inheritDoc}.
     */
@Override
public List<PartitionInfo> getPartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
    try {
        final List<Partition> partitions = getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable());
        final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
        final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
        final List<PartitionInfo> partitionInfos = new ArrayList<>();
        for (Partition partition : partitions) {
            partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
        }
        return partitionInfos;
    } catch (NoSuchObjectException exception) {
        throw new TableNotFoundException(tableName, exception);
    } catch (MetaException | InvalidObjectException e) {
        throw new InvalidMetaException("Invalid metadata for " + tableName, e);
    } catch (TException e) {
        throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
    }
}
Also used : TException(org.apache.thrift.TException) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Aggregations

PartitionInfo (com.netflix.metacat.common.server.connectors.model.PartitionInfo)9 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)7 TableNotFoundException (com.netflix.metacat.common.server.connectors.exception.TableNotFoundException)7 QualifiedName (com.netflix.metacat.common.QualifiedName)6 ConnectorPartitionService (com.netflix.metacat.common.server.connectors.ConnectorPartitionService)6 HashMap (java.util.HashMap)6 Map (java.util.Map)6 Strings (com.google.common.base.Strings)5 Lists (com.google.common.collect.Lists)5 List (java.util.List)5 Collectors (java.util.stream.Collectors)5 Nonnull (javax.annotation.Nonnull)5 Pageable (com.netflix.metacat.common.dto.Pageable)4 Sort (com.netflix.metacat.common.dto.Sort)4 PartitionListRequest (com.netflix.metacat.common.server.connectors.model.PartitionListRequest)4 Nullable (javax.annotation.Nullable)4 Inject (javax.inject.Inject)4 Named (javax.inject.Named)4 Maps (com.google.common.collect.Maps)3 Slf4j (lombok.extern.slf4j.Slf4j)3