Search in sources :

Example 11 with ConnectorContext

use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.

the class TableServiceImpl method rename.

@Override
public void rename(@Nonnull final QualifiedName oldName, @Nonnull final QualifiedName newName, final boolean isMView) {
    validate(oldName);
    final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
    final ConnectorTableService service = connectorManager.getTableService(oldName.getCatalogName());
    final TableDto oldTable = get(oldName, true).orElseThrow(() -> new TableNotFoundException(oldName));
    if (oldTable != null) {
        //Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata
        eventBus.postSync(new MetacatRenameTablePreEvent(oldName, metacatRequestContext, this, newName));
        try {
            log.info("Renaming {} {} to {}", isMView ? "view" : "table", oldName, newName);
            final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
            service.rename(connectorContext, oldName, newName);
        } catch (UnsupportedOperationException ignored) {
        }
        userMetadataService.renameDefinitionMetadataKey(oldName, newName);
        tagService.rename(oldName, newName.getTableName());
        final TableDto dto = get(newName, true).orElseThrow(() -> new IllegalStateException("should exist"));
        eventBus.postAsync(new MetacatRenameTablePostEvent(oldName, metacatRequestContext, this, oldTable, dto));
    }
}
Also used : TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) MetacatRequestContext(com.netflix.metacat.common.MetacatRequestContext) ConnectorTableService(com.netflix.metacat.common.server.connectors.ConnectorTableService) MetacatRenameTablePreEvent(com.netflix.metacat.common.server.events.MetacatRenameTablePreEvent) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) TableDto(com.netflix.metacat.common.dto.TableDto) MetacatRenameTablePostEvent(com.netflix.metacat.common.server.events.MetacatRenameTablePostEvent)

Example 12 with ConnectorContext

use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.

the class HiveConnectorPartitionService method savePartitions.

/**
     * {@inheritDoc}.
     */
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
    final String databasename = tableName.getDatabaseName();
    final String tablename = tableName.getTableName();
    // New partitions
    final List<Partition> hivePartitions = Lists.newArrayList();
    try {
        final Table table = metacatHiveClient.getTableByName(databasename, tablename);
        final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
        // New partition ids
        final List<String> addedPartitionIds = Lists.newArrayList();
        // Updated partition ids
        final List<String> existingPartitionIds = Lists.newArrayList();
        // Existing partitions
        final List<Partition> existingHivePartitions = Lists.newArrayList();
        // Existing partition map
        Map<String, Partition> existingPartitionMap = Collections.emptyMap();
        if (partitionsSaveRequest.getCheckIfExists()) {
            final List<String> partitionNames = partitionInfos.stream().map(partition -> {
                final String partitionName = partition.getName().getPartitionName();
                PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
                return partitionName;
            }).collect(Collectors.toList());
            existingPartitionMap = getPartitionsByNames(table, partitionNames);
        }
        final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
        for (PartitionInfo partitionInfo : partitionInfos) {
            final String partitionName = partitionInfo.getName().getPartitionName();
            final Partition hivePartition = existingPartitionMap.get(partitionName);
            if (hivePartition == null) {
                addedPartitionIds.add(partitionName);
                hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
            } else {
                //unless we alterifExists
                if (partitionsSaveRequest.getAlterIfExists()) {
                    final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
                    existingPartitionIds.add(partitionName);
                    existingPartition.setParameters(hivePartition.getParameters());
                    existingPartition.setCreateTime(hivePartition.getCreateTime());
                    existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
                    existingHivePartitions.add(existingPartition);
                }
            }
        }
        final Set<String> deletePartitionIds = Sets.newHashSet();
        if (!partitionsSaveRequest.getAlterIfExists()) {
            deletePartitionIds.addAll(existingPartitionIds);
        }
        if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
            deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
        }
        if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
            copyTableSdToPartitionSd(existingHivePartitions, table);
            metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
        }
        copyTableSdToPartitionSd(hivePartitions, table);
        metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
        final PartitionsSaveResponse result = new PartitionsSaveResponse();
        result.setAdded(addedPartitionIds);
        result.setUpdated(existingPartitionIds);
        return result;
    } catch (NoSuchObjectException exception) {
        if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
            throw new PartitionNotFoundException(tableName, "", exception);
        } else {
            throw new TableNotFoundException(tableName, exception);
        }
    } catch (MetaException | InvalidObjectException exception) {
        throw new InvalidMetaException("One or more partitions are invalid.", exception);
    } catch (AlreadyExistsException e) {
        final List<String> ids = getFakePartitionName(hivePartitions);
        throw new PartitionAlreadyExistsException(tableName, ids, e);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
    }
}
Also used : MetaException(org.apache.hadoop.hive.metastore.api.MetaException) SortOrder(com.netflix.metacat.common.dto.SortOrder) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) Partition(org.apache.hadoop.hive.metastore.api.Partition) Function(java.util.function.Function) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) ArrayList(java.util.ArrayList) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Inject(javax.inject.Inject) LinkedHashMap(java.util.LinkedHashMap) Strings(com.google.common.base.Strings) ConnectorPartitionService(com.netflix.metacat.common.server.connectors.ConnectorPartitionService) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) Lists(com.google.common.collect.Lists) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) PartitionUtil(com.netflix.metacat.common.server.partition.util.PartitionUtil) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) NonNull(lombok.NonNull) Pageable(com.netflix.metacat.common.dto.Pageable) TException(org.apache.thrift.TException) Set(java.util.Set) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionsSaveResponse(com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) PartitionsSaveRequest(com.netflix.metacat.common.server.connectors.model.PartitionsSaveRequest) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) PartitionNotFoundException(com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException) Collections(java.util.Collections) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Sort(com.netflix.metacat.common.dto.Sort) TException(org.apache.thrift.TException) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) PartitionNotFoundException(com.netflix.metacat.common.server.connectors.exception.PartitionNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionsSaveResponse(com.netflix.metacat.common.server.connectors.model.PartitionsSaveResponse) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) ArrayList(java.util.ArrayList) List(java.util.List) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 13 with ConnectorContext

use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.

the class HiveConnectorDatabaseService method list.

/**
     * {@inheritDoc}.
     */
@Override
public List<DatabaseInfo> list(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
    try {
        final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
        for (String databaseName : metacatHiveClient.getAllDatabases()) {
            final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
            if (!qualifiedName.toString().startsWith(prefix.toString())) {
                continue;
            }
            databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
        }
        //supporting sort by name only
        if (sort != null) {
            ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
        }
        return ConnectorUtils.paginate(databaseInfos, pageable);
    } catch (MetaException exception) {
        throw new InvalidMetaException(name, exception);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
    }
}
Also used : MetaException(org.apache.hadoop.hive.metastore.api.MetaException) DatabaseAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.DatabaseAlreadyExistsException) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Inject(javax.inject.Inject) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) Lists(com.google.common.collect.Lists) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) MetacatNotSupportedException(com.netflix.metacat.common.exception.MetacatNotSupportedException) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) ConnectorDatabaseService(com.netflix.metacat.common.server.connectors.ConnectorDatabaseService) NonNull(lombok.NonNull) Pageable(com.netflix.metacat.common.dto.Pageable) TException(org.apache.thrift.TException) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) List(java.util.List) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) Comparator(java.util.Comparator) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Sort(com.netflix.metacat.common.dto.Sort) TException(org.apache.thrift.TException) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 14 with ConnectorContext

use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionKeys.

/**
     * {@inheritDoc}.
     */
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", "getPartitionKeys");
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        final FilterPartition filter = new FilterPartition();
        // batch exists
        final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
        final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                final String name = rs.getString("name");
                final String uri = rs.getString("uri");
                final long createdDate = rs.getLong(FIELD_DATE_CREATED);
                Map<String, String> values = null;
                if (hasDateCreated) {
                    values = Maps.newHashMap();
                    values.put(FIELD_DATE_CREATED, createdDate + "");
                }
                if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                    names.add(name);
                }
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), filterExpression, partitionNames, SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
    } else {
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                names.add(rs.getString("name"));
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
    }
    final long duration = registry.clock().monotonicTime() - start;
    log.debug("### Time taken to complete getPartitionKeys is {} ms", duration);
    this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) HashMap(java.util.HashMap) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap)

Example 15 with ConnectorContext

use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionNames.

/**
     * getPartitionNames.
     *
     * @param uris         uris
     * @param prefixSearch prefixSearch
     * @return partition names
     */
@Override
public Map<String, List<QualifiedName>> getPartitionNames(@Nonnull final ConnectorContext context, @Nonnull final List<String> uris, final boolean prefixSearch) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", HiveMetrics.getPartitionNames.name());
    final Map<String, List<QualifiedName>> result = Maps.newHashMap();
    // Get data source
    final DataSource dataSource = DataSourceManager.get().get(catalogName);
    // Create the sql
    final StringBuilder queryBuilder = new StringBuilder(SQL_GET_PARTITION_NAMES_BY_URI);
    final List<String> params = Lists.newArrayList();
    if (prefixSearch) {
        queryBuilder.append(" 1=2");
        uris.forEach(uri -> {
            queryBuilder.append(" or location like ?");
            params.add(uri + "%");
        });
    } else {
        queryBuilder.append(" location in (");
        Joiner.on(',').appendTo(queryBuilder, uris.stream().map(uri -> "?").collect(Collectors.toList()));
        queryBuilder.append(")");
        params.addAll(uris);
    }
    // Handler for reading the result set
    final ResultSetHandler<Map<String, List<QualifiedName>>> handler = rs -> {
        while (rs.next()) {
            final String schemaName = rs.getString("schema_name");
            final String tableName = rs.getString("table_name");
            final String partitionName = rs.getString("partition_name");
            final String uri = rs.getString("location");
            final List<QualifiedName> partitionNames = result.get(uri);
            final QualifiedName qualifiedName = QualifiedName.ofPartition(catalogName, schemaName, tableName, partitionName);
            if (partitionNames == null) {
                result.put(uri, Lists.newArrayList(qualifiedName));
            } else {
                partitionNames.add(qualifiedName);
            }
        }
        return result;
    };
    try (Connection conn = dataSource.getConnection()) {
        new QueryRunner().query(conn, queryBuilder.toString(), handler, params.toArray());
    } catch (SQLException e) {
        Throwables.propagate(e);
    } finally {
        final long duration = registry.clock().monotonicTime() - start;
        log.debug("### Time taken to complete getPartitionNames is {} ms", duration);
        this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    }
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) QualifiedName(com.netflix.metacat.common.QualifiedName) Connection(java.sql.Connection) QueryRunner(org.apache.commons.dbutils.QueryRunner) DataSource(javax.sql.DataSource) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap)

Aggregations

ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)29 MetacatRequestContext (com.netflix.metacat.common.MetacatRequestContext)21 QualifiedName (com.netflix.metacat.common.QualifiedName)15 List (java.util.List)13 ConnectorTableService (com.netflix.metacat.common.server.connectors.ConnectorTableService)11 TableNotFoundException (com.netflix.metacat.common.server.connectors.exception.TableNotFoundException)11 Map (java.util.Map)11 Nonnull (javax.annotation.Nonnull)11 Lists (com.google.common.collect.Lists)10 Pageable (com.netflix.metacat.common.dto.Pageable)9 Sort (com.netflix.metacat.common.dto.Sort)9 ConnectorPartitionService (com.netflix.metacat.common.server.connectors.ConnectorPartitionService)9 PartitionInfo (com.netflix.metacat.common.server.connectors.model.PartitionInfo)9 HashMap (java.util.HashMap)9 Collectors (java.util.stream.Collectors)8 Inject (javax.inject.Inject)8 Named (javax.inject.Named)8 Strings (com.google.common.base.Strings)7 Maps (com.google.common.collect.Maps)7 HiveConnectorInfoConverter (com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter)7