Search in sources :

Example 1 with PartitionListRequest

use of com.netflix.metacat.common.server.connectors.model.PartitionListRequest in project metacat by Netflix.

the class DirectSqlGetPartition method getPartitionKeys.

/**
 * Gets the partition names/keys based on a filter expression for the specified table.
 *
 * @param requestContext    The Metacat request context
 * @param tableName         table handle to get partition for
 * @param partitionsRequest The metadata for what kind of partitions to get from the table
 * @return filtered list of partition names
 */
@Transactional(readOnly = true)
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().wallTime();
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        return filterPartitionsColumn(tableName.getDatabaseName(), tableName.getTableName(), partitionNames, PARTITION_NAME, filterExpression, sort, pageable, partitionsRequest.getIncludeAuditOnly());
    } else {
        final ResultSetExtractor<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                names.add(rs.getString("name"));
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable, partitionsRequest.getIncludeAuditOnly());
    }
    this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
    return result;
}
Also used : HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) TimeoutException(java.util.concurrent.TimeoutException) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) Matcher(java.util.regex.Matcher) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) Types(java.sql.Types) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) ImmutableList(com.google.common.collect.ImmutableList) Qualifier(org.springframework.beans.factory.annotation.Qualifier) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Config(com.netflix.metacat.common.server.properties.Config) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) Table(org.apache.hadoop.hive.metastore.api.Table) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HivePartitionKeyParserEval(com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) Transactional(org.springframework.transaction.annotation.Transactional) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Transactional(org.springframework.transaction.annotation.Transactional)

Example 2 with PartitionListRequest

use of com.netflix.metacat.common.server.connectors.model.PartitionListRequest in project metacat by Netflix.

the class DirectSqlGetPartition method getPartitionUris.

/**
 * Gets the partition uris based on a filter expression for the specified table.
 *
 * @param requestContext    The Metacat request context
 * @param tableName         table handle to get partition for
 * @param partitionsRequest The metadata for what kind of partitions to get from the table
 * @return filtered list of partition names
 */
@Transactional(readOnly = true)
public List<String> getPartitionUris(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().wallTime();
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        return filterPartitionsColumn(tableName.getDatabaseName(), tableName.getTableName(), partitionNames, PARTITION_URI, filterExpression, sort, pageable, partitionsRequest.getIncludeAuditOnly());
    } else {
        final ResultSetExtractor<List<String>> handler = rs -> {
            final List<String> uris = Lists.newArrayList();
            while (rs.next()) {
                uris.add(rs.getString(PARTITION_URI));
            }
            return uris;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL.SQL_GET_PARTITIONS_URI, handler, sort, pageable, partitionsRequest.getIncludeAuditOnly());
    }
    this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
    return result;
}
Also used : HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) TimeoutException(java.util.concurrent.TimeoutException) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) Matcher(java.util.regex.Matcher) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) Types(java.sql.Types) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) ImmutableList(com.google.common.collect.ImmutableList) Qualifier(org.springframework.beans.factory.annotation.Qualifier) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Config(com.netflix.metacat.common.server.properties.Config) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) Table(org.apache.hadoop.hive.metastore.api.Table) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HivePartitionKeyParserEval(com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) Transactional(org.springframework.transaction.annotation.Transactional) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Transactional(org.springframework.transaction.annotation.Transactional)

Example 3 with PartitionListRequest

use of com.netflix.metacat.common.server.connectors.model.PartitionListRequest in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionKeys.

/**
     * {@inheritDoc}.
     */
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", "getPartitionKeys");
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        final FilterPartition filter = new FilterPartition();
        // batch exists
        final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
        final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                final String name = rs.getString("name");
                final String uri = rs.getString("uri");
                final long createdDate = rs.getLong(FIELD_DATE_CREATED);
                Map<String, String> values = null;
                if (hasDateCreated) {
                    values = Maps.newHashMap();
                    values.put(FIELD_DATE_CREATED, createdDate + "");
                }
                if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                    names.add(name);
                }
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), filterExpression, partitionNames, SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
    } else {
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                names.add(rs.getString("name"));
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
    }
    final long duration = registry.clock().monotonicTime() - start;
    log.debug("### Time taken to complete getPartitionKeys is {} ms", duration);
    this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) HashMap(java.util.HashMap) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap)

Example 4 with PartitionListRequest

use of com.netflix.metacat.common.server.connectors.model.PartitionListRequest in project metacat by Netflix.

the class ConverterUtil method toPartitionListRequest.

/**
 * Creates the partition list connector request.
 *
 * @param partitionsRequestDto request containing the filter and other properties used for listing
 * @param pageable             pageable info
 * @param sort                 sort info
 * @return connector request
 */
public PartitionListRequest toPartitionListRequest(final GetPartitionsRequestDto partitionsRequestDto, final Pageable pageable, final Sort sort) {
    if (partitionsRequestDto != null) {
        if (partitionsRequestDto.getIncludePartitionDetails() == null) {
            partitionsRequestDto.setIncludePartitionDetails(false);
        }
        if (partitionsRequestDto.getIncludeAuditOnly() == null) {
            partitionsRequestDto.setIncludeAuditOnly(false);
        }
        final PartitionListRequest result = mapper.map(partitionsRequestDto, PartitionListRequest.class);
        result.setPageable(pageable);
        result.setSort(sort);
        return result;
    } else {
        return new PartitionListRequest(null, null, false, pageable, sort, false);
    }
}
Also used : PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest)

Example 5 with PartitionListRequest

use of com.netflix.metacat.common.server.connectors.model.PartitionListRequest in project metacat by Netflix.

the class PartitionServiceImpl method list.

/**
 * {@inheritDoc}
 */
@Override
public List<PartitionDto> list(final QualifiedName name, @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean includeUserDefinitionMetadata, final boolean includeUserDataMetadata, @Nullable final GetPartitionsRequestDto getPartitionsRequestDto) {
    // add table info here
    // the conversion will handle getPartitionsRequestDto as null case
    final PartitionListRequest partitionListRequest = converterUtil.toPartitionListRequest(getPartitionsRequestDto, pageable, sort);
    final String filterExpression = partitionListRequest.getFilter();
    final List<String> partitionNames = partitionListRequest.getPartitionNames();
    if (Strings.isNullOrEmpty(filterExpression) && (pageable == null || !pageable.isPageable()) && (partitionNames == null || partitionNames.isEmpty()) && config.getNamesToThrowErrorOnListPartitionsWithNoFilter().contains(name)) {
        throw new IllegalArgumentException(String.format("No filter or limit specified for table %s", name));
    }
    final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
    final ConnectorPartitionService service = connectorManager.getPartitionService(name);
    final ConnectorRequestContext connectorRequestContext = converterUtil.toConnectorContext(metacatRequestContext);
    final List<PartitionInfo> resultInfo = service.getPartitions(connectorRequestContext, name, partitionListRequest, this.getTableInfo(name));
    List<PartitionDto> result = Lists.newArrayList();
    if (resultInfo != null && !resultInfo.isEmpty()) {
        result = resultInfo.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
        final List<QualifiedName> names = Lists.newArrayList();
        final List<String> uris = Lists.newArrayList();
        final Map<String, ObjectNode> prePopulatedMap = new HashMap<>();
        resultInfo.stream().filter(partitionInfo -> partitionInfo.getDataMetrics() != null).forEach(partitionInfo -> prePopulatedMap.put(partitionInfo.getName().toString(), partitionInfo.getDataMetrics()));
        result.forEach(partitionDto -> {
            names.add(partitionDto.getName());
            if (partitionDto.isDataExternal()) {
                uris.add(partitionDto.getDataUri());
            }
        });
        registry.distributionSummary(this.partitionGetDistSummary.withTags(name.parts())).record(result.size());
        log.info("Got {} partitions for {} using filter: {} and partition names: {}", result.size(), name, filterExpression, partitionNames);
        if (includeUserDefinitionMetadata || includeUserDataMetadata) {
            final List<ListenableFuture<Map<String, ObjectNode>>> futures = Lists.newArrayList();
            futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDefinitionMetadata ? userMetadataService.getDefinitionMetadataMap(names) : Maps.newHashMap()));
            futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDataMetadata ? userMetadataService.getDataMetadataMap(uris) : Maps.newHashMap()));
            try {
                final List<Map<String, ObjectNode>> metadataResults = Futures.successfulAsList(futures).get(1, TimeUnit.HOURS);
                final Map<String, ObjectNode> definitionMetadataMap = metadataResults.get(0);
                final Map<String, ObjectNode> dataMetadataMap = metadataResults.get(1);
                result.forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto, definitionMetadataMap.get(partitionDto.getName().toString()), prePopulatedMap.containsKey(partitionDto.getName().toString()) ? // using the prepopulated datametric
                prePopulatedMap.get(partitionDto.getName().toString()) : dataMetadataMap.get(partitionDto.getDataUri())));
            } catch (Exception e) {
                Throwables.propagate(e);
            }
        }
    }
    return result;
}
Also used : UserMetadataService(com.netflix.metacat.common.server.usermetadata.UserMetadataService) CatalogService(com.netflix.metacat.main.services.CatalogService) MetacatEventBus(com.netflix.metacat.common.server.events.MetacatEventBus) ConverterUtil(com.netflix.metacat.common.server.converter.ConverterUtil) MetacatContextManager(com.netflix.metacat.common.server.util.MetacatContextManager) PartitionService(com.netflix.metacat.main.services.PartitionService) PartitionsSaveResponseDto(com.netflix.metacat.common.dto.PartitionsSaveResponseDto) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) PartitionsSaveRequestDto(com.netflix.metacat.common.dto.PartitionsSaveRequestDto) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Collectors(java.util.stream.Collectors) PartitionsSaveResponse(com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse) HasMetadata(com.netflix.metacat.common.dto.HasMetadata) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) MetacatSaveTablePartitionMetadataOnlyPostEvent(com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPostEvent) PartitionDto(com.netflix.metacat.common.dto.PartitionDto) ConnectorManager(com.netflix.metacat.main.manager.ConnectorManager) GetTableServiceParameters(com.netflix.metacat.main.services.GetTableServiceParameters) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) ConnectorPartitionService(com.netflix.metacat.common.server.connectors.ConnectorPartitionService) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) TableService(com.netflix.metacat.main.services.TableService) MetacatRequestContext(com.netflix.metacat.common.MetacatRequestContext) Config(com.netflix.metacat.common.server.properties.Config) Nullable(javax.annotation.Nullable) Metrics(com.netflix.metacat.common.server.monitoring.Metrics) MetacatDeleteTablePartitionPostEvent(com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPostEvent) GetPartitionsRequestDto(com.netflix.metacat.common.dto.GetPartitionsRequestDto) Throwables(com.google.common.base.Throwables) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Maps(com.google.common.collect.Maps) TimeUnit(java.util.concurrent.TimeUnit) Futures(com.google.common.util.concurrent.Futures) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) Registry(com.netflix.spectator.api.Registry) MetacatSaveTablePartitionPostEvent(com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPostEvent) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) MetacatDeleteTablePartitionPreEvent(com.netflix.metacat.common.server.events.MetacatDeleteTablePartitionPreEvent) MetacatSaveTablePartitionMetadataOnlyPreEvent(com.netflix.metacat.common.server.events.MetacatSaveTablePartitionMetadataOnlyPreEvent) MetacatSaveTablePartitionPreEvent(com.netflix.metacat.common.server.events.MetacatSaveTablePartitionPreEvent) MetacatRequestContext(com.netflix.metacat.common.MetacatRequestContext) ObjectNode(com.fasterxml.jackson.databind.node.ObjectNode) HashMap(java.util.HashMap) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) QualifiedName(com.netflix.metacat.common.QualifiedName) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) PartitionDto(com.netflix.metacat.common.dto.PartitionDto) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ConnectorPartitionService(com.netflix.metacat.common.server.connectors.ConnectorPartitionService) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

PartitionListRequest (com.netflix.metacat.common.server.connectors.model.PartitionListRequest)6 QualifiedName (com.netflix.metacat.common.QualifiedName)5 Pageable (com.netflix.metacat.common.dto.Pageable)5 Sort (com.netflix.metacat.common.dto.Sort)5 PartitionInfo (com.netflix.metacat.common.server.connectors.model.PartitionInfo)5 Registry (com.netflix.spectator.api.Registry)5 List (java.util.List)5 Map (java.util.Map)5 Strings (com.google.common.base.Strings)4 Throwables (com.google.common.base.Throwables)4 Lists (com.google.common.collect.Lists)4 Maps (com.google.common.collect.Maps)4 Futures (com.google.common.util.concurrent.Futures)4 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)4 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)4 ConnectorRequestContext (com.netflix.metacat.common.server.connectors.ConnectorRequestContext)4 AuditInfo (com.netflix.metacat.common.server.connectors.model.AuditInfo)4 StorageInfo (com.netflix.metacat.common.server.connectors.model.StorageInfo)4 ThreadServiceManager (com.netflix.metacat.common.server.util.ThreadServiceManager)4 HiveMetrics (com.netflix.metacat.connector.hive.monitoring.HiveMetrics)4