Search in sources :

Example 1 with ResultSetHandler

use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionCount.

/**
     * Number of partitions for the given table.
     *
     * @param tableName tableName
     * @return Number of partitions
     */
@Override
public int getPartitionCount(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", HiveMetrics.getPartitionCount.name());
    final Integer result;
    final DataSource dataSource = DataSourceManager.get().get(catalogName);
    try (Connection conn = dataSource.getConnection()) {
        // Handler for reading the result set
        final ResultSetHandler<Integer> handler = rs -> {
            int count = 0;
            while (rs.next()) {
                count = rs.getInt("count");
            }
            return count;
        };
        result = new QueryRunner().query(conn, SQL_GET_PARTITION_COUNT, handler, tableName.getDatabaseName(), tableName.getTableName());
    } catch (SQLException e) {
        throw new ConnectorException("getPartitionCount", e);
    } finally {
        final long duration = registry.clock().monotonicTime() - start;
        log.debug("### Time taken to complete getPartitionCount is {} ms", duration);
        this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    }
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) Connection(java.sql.Connection) QueryRunner(org.apache.commons.dbutils.QueryRunner) DataSource(javax.sql.DataSource)

Example 2 with ResultSetHandler

use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getpartitions.

private List<PartitionInfo> getpartitions(@Nonnull @NonNull final String databaseName, @Nonnull @NonNull final String tableName, @Nullable final List<String> partitionIds, final String filterExpression, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
    final FilterPartition filter = new FilterPartition();
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
    // Handler for reading the result set
    final ResultSetHandler<List<PartitionDetail>> handler = rs -> {
        final List<PartitionDetail> result = Lists.newArrayList();
        while (rs.next()) {
            final String name = rs.getString("name");
            final String uri = rs.getString("uri");
            final long createdDate = rs.getLong(FIELD_DATE_CREATED);
            Map<String, String> values = null;
            if (hasDateCreated) {
                values = Maps.newHashMap();
                values.put(FIELD_DATE_CREATED, createdDate + "");
            }
            if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                final Long id = rs.getLong("id");
                final Long sdId = rs.getLong("sd_id");
                final Long serdeId = rs.getLong("serde_id");
                final String inputFormat = rs.getString("input_format");
                final String outputFormat = rs.getString("output_format");
                final String serializationLib = rs.getString("slib");
                final StorageInfo storageInfo = new StorageInfo();
                storageInfo.setUri(uri);
                storageInfo.setInputFormat(inputFormat);
                storageInfo.setOutputFormat(outputFormat);
                storageInfo.setSerializationLib(serializationLib);
                final AuditInfo auditInfo = new AuditInfo();
                auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                result.add(new PartitionDetail(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
            }
        }
        return result;
    };
    final List<PartitionInfo> partitionInfos = new ArrayList<>();
    final List<PartitionDetail> partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL_GET_PARTITIONS, handler, sort, pageable);
    if (includePartitionDetails && !partitions.isEmpty()) {
        final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
        for (PartitionDetail partitionDetail : partitions) {
            partIds.add(partitionDetail.getId());
            sdIds.add(partitionDetail.getSdId());
            serdeIds.add(partitionDetail.getSerdeId());
        }
        final List<ListenableFuture<Void>> futures = Lists.newArrayList();
        final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
        futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
        final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
        if (!sdIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL_GET_SD_PARAMS, "sd_id", sdParams)));
        }
        final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
        if (!serdeIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
        }
        try {
            Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1, TimeUnit.HOURS);
        } catch (Exception e) {
            Throwables.propagate(e);
        }
        for (PartitionDetail partitionDetail : partitions) {
            partitionDetail.getPartitionInfo().setMetadata(partitionParams.get(partitionDetail.getId()));
            partitionDetail.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionDetail.getSdId()));
            partitionDetail.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionDetail.getSerdeId()));
        }
    }
    for (PartitionDetail partitionDetail : partitions) {
        partitionInfos.add(partitionDetail.getPartitionInfo());
    }
    return partitionInfos;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ArrayList(java.util.ArrayList) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) SQLException(java.sql.SQLException) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) List(java.util.List) ArrayList(java.util.ArrayList) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) HashMap(java.util.HashMap)

Example 3 with ResultSetHandler

use of org.apache.commons.dbutils.ResultSetHandler in project jSqlBox by drinkjava2.

the class ImprovedQueryRunner method batchFlush.

// === Batch execute methods======
/**
 * Force flush cached SQLs
 */
public void batchFlush() throws SQLException {
    List<Object[]> sqlCacheList = sqlBatchCache.get();
    if (sqlCacheList.isEmpty())
        return;
    // first row
    Object[] f = sqlCacheList.get(0);
    if (f.length != 6)
        throw new DbProRuntimeException("Unexpected batch cached SQL format");
    int paramLenth = 0;
    if ("i1".equals(f[0]) || "i3".equals(f[0]) || "u1".equals(f[0]) || "u4".equals(f[0]))
        paramLenth = 0;
    if ("u2".equals(f[0]) || "u5".equals(f[0]))
        paramLenth = 1;
    else
        paramLenth = ((Object[]) sqlCacheList.get(0)[5]).length;
    Object[][] allParams = new Object[sqlCacheList.size()][paramLenth];
    int i = 0;
    for (Object[] c : sqlCacheList) {
        // cached parameters
        Object param = c[2];
        Object[] params = (Object[]) c[5];
        if ("i1".equals(f[0]) || "i3".equals(f[0]) || "u1".equals(f[0]) || "u4".equals(f[0]))
            allParams[i] = new Object[0];
        if ("u2".equals(f[0]) || "u5".equals(f[0]))
            allParams[i] = new Object[] { param };
        else
            allParams[i] = params;
        i++;
    }
    String sql = (String) f[3];
    Connection conn = (Connection) f[4];
    ResultSetHandler rsh = (ResultSetHandler) f[1];
    if (this.getAllowShowSQL()) {
        logger.info("Batch execute " + sqlCacheList.size() + " SQLs");
        logger.info(formatSqlForLoggerOutput(sql));
        logger.info("First row " + formatParametersForLoggerOutput(allParams[0]));
        logger.info("Last row " + formatParametersForLoggerOutput(allParams[allParams.length - 1]));
    }
    if ("e1".equals(f[0]) || "i1".equals(f[0]) || "u1".equals(f[0]) || "u2".equals(f[0]) || "u3".equals(f[0]))
        super.batch(conn, sql, allParams);
    else if ("e3".equals(f[0]) || "i3".equals(f[0]) || "u4".equals(f[0]) || "u5".equals(f[0]) || "u6".equals(f[0]))
        super.batch(sql, allParams);
    else if ("e2".equals(f[0]) || "i2".equals(f[0]))
        super.insertBatch(conn, sql, rsh, allParams);
    else if ("e4".equals(f[0]) || "i4".equals(f[0]))
        super.insertBatch(sql, rsh, allParams);
    else
        throw new DbProRuntimeException("unknow batch sql operation type +'" + f[0] + "'");
    sqlBatchCache.get().clear();
}
Also used : DbProRuntimeException(com.github.drinkjava2.jdbpro.DbProRuntimeException) Connection(java.sql.Connection) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler)

Example 4 with ResultSetHandler

use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getPartitionKeys.

/**
     * {@inheritDoc}.
     */
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().monotonicTime();
    final Map<String, String> tags = new HashMap<String, String>();
    tags.put("request", "getPartitionKeys");
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        final FilterPartition filter = new FilterPartition();
        // batch exists
        final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
        final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                final String name = rs.getString("name");
                final String uri = rs.getString("uri");
                final long createdDate = rs.getLong(FIELD_DATE_CREATED);
                Map<String, String> values = null;
                if (hasDateCreated) {
                    values = Maps.newHashMap();
                    values.put(FIELD_DATE_CREATED, createdDate + "");
                }
                if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                    names.add(name);
                }
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), filterExpression, partitionNames, SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
    } else {
        // Handler for reading the result set
        final ResultSetHandler<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                names.add(rs.getString("name"));
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
    }
    final long duration = registry.clock().monotonicTime() - start;
    log.debug("### Time taken to complete getPartitionKeys is {} ms", duration);
    this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
    return result;
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) HashMap(java.util.HashMap) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with ResultSetHandler

use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getparameters.

private Map<Long, Map<String, String>> getparameters(final List<Long> ids, final String sql, final String idName) {
    // Get data source
    final DataSource dataSource = DataSourceManager.get().get(catalogName);
    // Create the sql
    final StringBuilder queryBuilder = new StringBuilder(sql);
    if (!ids.isEmpty()) {
        queryBuilder.append(" and ").append(idName).append(" in ('").append(Joiner.on("','").skipNulls().join(ids)).append("')");
    }
    final ResultSetHandler<Map<Long, Map<String, String>>> handler = rs -> {
        final Map<Long, Map<String, String>> result = Maps.newHashMap();
        while (rs.next()) {
            final Long id = rs.getLong(idName);
            final String key = rs.getString("param_key");
            final String value = rs.getString("param_value");
            Map<String, String> parameters = result.get(id);
            if (parameters == null) {
                parameters = Maps.newHashMap();
                result.put(id, parameters);
            }
            parameters.put(key, value);
        }
        return result;
    };
    try (Connection conn = dataSource.getConnection()) {
        return new QueryRunner().query(conn, queryBuilder.toString(), handler);
    } catch (SQLException e) {
        Throwables.propagate(e);
    }
    return Maps.newHashMap();
}
Also used : Connection(java.sql.Connection) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) QueryRunner(org.apache.commons.dbutils.QueryRunner) NonNull(lombok.NonNull) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) ResultSetHandler(org.apache.commons.dbutils.ResultSetHandler) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) Id(com.netflix.spectator.api.Id) ArrayList(java.util.ArrayList) Inject(javax.inject.Inject) Strings(com.google.common.base.Strings) SQLException(java.sql.SQLException) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) DataSource(javax.sql.DataSource) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Named(javax.inject.Named) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) PartitionDetail(com.netflix.metacat.connector.hive.util.PartitionDetail) Functions(com.google.common.base.Functions) DataSourceManager(com.netflix.metacat.common.server.util.DataSourceManager) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) SQLException(java.sql.SQLException) Connection(java.sql.Connection) Map(java.util.Map) HashMap(java.util.HashMap) QueryRunner(org.apache.commons.dbutils.QueryRunner) DataSource(javax.sql.DataSource)

Aggregations

ResultSetHandler (org.apache.commons.dbutils.ResultSetHandler)14 Connection (java.sql.Connection)12 SQLException (java.sql.SQLException)11 Lists (com.google.common.collect.Lists)10 Maps (com.google.common.collect.Maps)10 QualifiedName (com.netflix.metacat.common.QualifiedName)10 DataSourceManager (com.netflix.metacat.common.server.util.DataSourceManager)10 List (java.util.List)10 Map (java.util.Map)10 Joiner (com.google.common.base.Joiner)9 Strings (com.google.common.base.Strings)9 Date (java.util.Date)9 DataSource (javax.sql.DataSource)9 QueryRunner (org.apache.commons.dbutils.QueryRunner)9 Nonnull (javax.annotation.Nonnull)8 Slf4j (lombok.extern.slf4j.Slf4j)8 Throwables (com.google.common.base.Throwables)7 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)7 ThreadServiceManager (com.netflix.metacat.common.server.util.ThreadServiceManager)7 HiveConnectorInfoConverter (com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter)7