Search in sources :

Example 1 with ResultSetExtractor

use of org.springframework.jdbc.core.ResultSetExtractor in project opennms by OpenNMS.

the class AlarmdIT method changeFields.

@Test
public void changeFields() throws InterruptedException, SQLException {
    assertEmptyAlarmTable();
    String reductionKey = "testUpdateField";
    MockNode node1 = m_mockNetwork.getNode(1);
    // Verify we have the default alarm
    sendNodeDownEvent(reductionKey, node1);
    int severity = m_jdbcTemplate.queryForObject("select severity from alarms a where a.reductionKey = ?", new Object[] { reductionKey }, Integer.class).intValue();
    assertEquals(OnmsSeverity.MAJOR, OnmsSeverity.get(severity));
    // Store the original logmsg from the original alarm (we are about to test changing it with subsequent alarm reduction)
    String defaultLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {

        @Override
        public String extractData(ResultSet results) throws SQLException, DataAccessException {
            results.next();
            int row = results.getRow();
            boolean isLast = results.isLast();
            boolean isFirst = results.isFirst();
            if (row != 1 && !isLast && !isFirst) {
                throw new SQLException("Row count is not = 1.  There should only be one row returned from the query: \n" + results.getStatement());
            }
            return results.getString(1);
        }
    });
    assertTrue("The logmsg column should not be null", defaultLogMsg != null);
    // Duplicate the alarm but change the severity and verify the change
    sendNodeDownEventWithUpdateFieldSeverity(reductionKey, node1, OnmsSeverity.CRITICAL);
    severity = m_jdbcTemplate.queryForObject("select severity from alarms", Integer.class).intValue();
    assertEquals("Severity should now be Critical", OnmsSeverity.CRITICAL, OnmsSeverity.get(severity));
    // Duplicate the alarm but don't force the change of severity
    sendNodeDownEvent(reductionKey, node1);
    severity = m_jdbcTemplate.queryForObject("select severity from alarms", Integer.class).intValue();
    assertEquals("Severity should still be Critical", OnmsSeverity.CRITICAL, OnmsSeverity.get(severity));
    // Duplicate the alarm and change the logmsg
    sendNodeDownEventChangeLogMsg(reductionKey, node1, "new logMsg");
    String newLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {

        @Override
        public String extractData(ResultSet results) throws SQLException, DataAccessException {
            results.next();
            return results.getString(1);
        }
    });
    assertEquals("new logMsg", newLogMsg);
    assertTrue(!newLogMsg.equals(defaultLogMsg));
    // Duplicate the alarm but force logmsg to not change (lggmsg field is updated by default)
    sendNodeDownEventDontChangeLogMsg(reductionKey, node1, "newer logMsg");
    newLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {

        @Override
        public String extractData(ResultSet results) throws SQLException, DataAccessException {
            results.next();
            return results.getString(1);
        }
    });
    assertTrue("The logMsg should not have changed.", !"newer logMsg".equals(newLogMsg));
    assertEquals("The logMsg should still be equal to the previous update.", "new logMsg", newLogMsg);
    // Duplicate the alarm with the default configuration and verify the logmsg has changed (as is the default behavior
    // for this field)
    sendNodeDownEvent(reductionKey, node1);
    newLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {

        @Override
        public String extractData(ResultSet results) throws SQLException, DataAccessException {
            results.next();
            return results.getString(1);
        }
    });
    assertTrue("The logMsg should have changed.", !"new logMsg".equals(newLogMsg));
    assertEquals("The logMsg should new be the default logMsg.", newLogMsg, defaultLogMsg);
}
Also used : SQLException(java.sql.SQLException) ResultSet(java.sql.ResultSet) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) MockNode(org.opennms.netmgt.mock.MockNode) DataAccessException(org.springframework.dao.DataAccessException) Test(org.junit.Test)

Example 2 with ResultSetExtractor

use of org.springframework.jdbc.core.ResultSetExtractor in project ANNIS by korpling.

the class AdministrationDao method listCorpusAlias.

/**
 * Provides a list where the keys are the aliases and the values are the
 * corpus names.
 *
 * @param databaseProperties
 * @return
 */
public Multimap<String, String> listCorpusAlias(File databaseProperties) {
    Multimap<String, String> result = TreeMultimap.create();
    DataSource origDataSource = getDataSource().getInnerDataSource();
    try {
        if (databaseProperties != null) {
            getDataSource().setInnerDataSource(createDataSource(databaseProperties));
        }
        result = getJdbcTemplate().query("SELECT a.alias AS alias, c.name AS corpus\n" + "FROM corpus_alias AS a, corpus AS c\n" + "WHERE\n" + " a.corpus_ref = c.id", new ResultSetExtractor<Multimap<String, String>>() {

            @Override
            public Multimap<String, String> extractData(ResultSet rs) throws SQLException, DataAccessException {
                Multimap<String, String> data = TreeMultimap.create();
                while (rs.next()) {
                    // alias -> corpus name
                    data.put(rs.getString(1), rs.getString(2));
                }
                return data;
            }
        });
    } catch (IOException | URISyntaxException | DataAccessException ex) {
        if (databaseProperties == null) {
            log.error("Could not query corpus list", ex);
        } else {
            log.error("Could not query corpus list for the file " + databaseProperties.getAbsolutePath(), ex);
        }
    } finally {
        getDataSource().setInnerDataSource(origDataSource);
    }
    return result;
}
Also used : ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) ResultSet(java.sql.ResultSet) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) DataAccessException(org.springframework.dao.DataAccessException) BasicDataSource(org.apache.commons.dbcp2.BasicDataSource) DataSource(javax.sql.DataSource)

Example 3 with ResultSetExtractor

use of org.springframework.jdbc.core.ResultSetExtractor in project metacat by Netflix.

the class DirectSqlGetPartition method getPartitionKeys.

/**
 * Gets the partition names/keys based on a filter expression for the specified table.
 *
 * @param requestContext    The Metacat request context
 * @param tableName         table handle to get partition for
 * @param partitionsRequest The metadata for what kind of partitions to get from the table
 * @return filtered list of partition names
 */
@Transactional(readOnly = true)
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
    final long start = registry.clock().wallTime();
    final List<String> result;
    final List<String> partitionNames = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final Pageable pageable = partitionsRequest.getPageable();
    final String filterExpression = partitionsRequest.getFilter();
    if (filterExpression != null) {
        return filterPartitionsColumn(tableName.getDatabaseName(), tableName.getTableName(), partitionNames, PARTITION_NAME, filterExpression, sort, pageable, partitionsRequest.getIncludeAuditOnly());
    } else {
        final ResultSetExtractor<List<String>> handler = rs -> {
            final List<String> names = Lists.newArrayList();
            while (rs.next()) {
                names.add(rs.getString("name"));
            }
            return names;
        };
        result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable, partitionsRequest.getIncludeAuditOnly());
    }
    this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
    return result;
}
Also used : HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) TimeoutException(java.util.concurrent.TimeoutException) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) Matcher(java.util.regex.Matcher) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) Types(java.sql.Types) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) ImmutableList(com.google.common.collect.ImmutableList) Qualifier(org.springframework.beans.factory.annotation.Qualifier) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Config(com.netflix.metacat.common.server.properties.Config) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) Table(org.apache.hadoop.hive.metastore.api.Table) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HivePartitionKeyParserEval(com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) Transactional(org.springframework.transaction.annotation.Transactional) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Transactional(org.springframework.transaction.annotation.Transactional)

Example 4 with ResultSetExtractor

use of org.springframework.jdbc.core.ResultSetExtractor in project metacat by Netflix.

the class DirectSqlGetPartition method filterPartitionsColumn.

/**
 * query partitions using filters from name or uri column.
 */
private List<String> filterPartitionsColumn(final String databaseName, final String tableName, final List<String> partitionNames, final String columnName, final String filterExpression, final Sort sort, final Pageable pageable, final boolean forceDisableAudit) {
    final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition() : new FilterPartition();
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
    ResultSetExtractor<List<String>> handler = rs -> {
        final List<String> columns = Lists.newArrayList();
        while (rs.next()) {
            final String name = rs.getString(PARTITION_NAME);
            final String uri = rs.getString(PARTITION_URI);
            final long createdDate = rs.getLong(FIELD_DATE_CREATED);
            Map<String, String> values = null;
            if (hasDateCreated) {
                values = Maps.newHashMap();
                values.put(FIELD_DATE_CREATED, createdDate + "");
            }
            if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                columns.add(rs.getString(columnName));
            }
        }
        return columns;
    };
    return getHandlerResults(databaseName, tableName, filterExpression, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable, forceDisableAudit);
}
Also used : HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) TimeoutException(java.util.concurrent.TimeoutException) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) Matcher(java.util.regex.Matcher) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) Types(java.sql.Types) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) ImmutableList(com.google.common.collect.ImmutableList) Qualifier(org.springframework.beans.factory.annotation.Qualifier) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Config(com.netflix.metacat.common.server.properties.Config) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) Table(org.apache.hadoop.hive.metastore.api.Table) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HivePartitionKeyParserEval(com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) Transactional(org.springframework.transaction.annotation.Transactional) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) Map(java.util.Map)

Example 5 with ResultSetExtractor

use of org.springframework.jdbc.core.ResultSetExtractor in project metacat by Netflix.

the class DirectSqlGetPartition method getPartitions.

private List<PartitionHolder> getPartitions(final String databaseName, final String tableName, @Nullable final List<String> partitionIds, @Nullable final String filterExpression, @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean includePartitionDetails, final boolean forceDisableAudit) {
    final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition() : new FilterPartition();
    // batch exists
    final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
    final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
    // Handler for reading the result set
    final ResultSetExtractor<List<PartitionHolder>> handler = rs -> {
        final List<PartitionHolder> result = Lists.newArrayList();
        final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
        int noOfRows = 0;
        while (rs.next()) {
            noOfRows++;
            final String name = rs.getString("name");
            final String uri = rs.getString("uri");
            final long createdDate = rs.getLong(FIELD_DATE_CREATED);
            Map<String, String> values = null;
            if (hasDateCreated) {
                values = Maps.newHashMap();
                values.put(FIELD_DATE_CREATED, createdDate + "");
            }
            if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
                final Long id = rs.getLong("id");
                final Long sdId = rs.getLong("sd_id");
                final Long serdeId = rs.getLong("serde_id");
                final String inputFormat = rs.getString("input_format");
                final String outputFormat = rs.getString("output_format");
                final String serializationLib = rs.getString("slib");
                final StorageInfo storageInfo = new StorageInfo();
                storageInfo.setUri(uri);
                storageInfo.setInputFormat(inputFormat);
                storageInfo.setOutputFormat(outputFormat);
                storageInfo.setSerializationLib(serializationLib);
                final AuditInfo auditInfo = new AuditInfo();
                auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
                result.add(new PartitionHolder(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
            }
            // Fail if the number of partitions exceeds the threshold limit.
            if (result.size() > config.getMaxPartitionsThreshold()) {
                registry.counter(registry.createId(HiveMetrics.CounterHiveGetPartitionsExceedThresholdFailure.getMetricName()).withTags(tableQName.parts())).increment();
                final String message = String.format("Number of partitions queried for table %s exceeded the threshold %d", tableQName, config.getMaxPartitionsThreshold());
                log.warn(message);
                throw new IllegalArgumentException(message);
            }
        }
        registry.gauge(registry.createId(HiveMetrics.GaugePreExpressionFilterGetPartitionsCount.getMetricName()).withTags(tableQName.parts())).set(noOfRows);
        return result;
    };
    final List<PartitionHolder> partitions = this.getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL.SQL_GET_PARTITIONS, handler, sort, pageable, forceDisableAudit);
    if (includePartitionDetails && !partitions.isEmpty()) {
        final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
        final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
        for (PartitionHolder partitionHolder : partitions) {
            partIds.add(partitionHolder.getId());
            sdIds.add(partitionHolder.getSdId());
            serdeIds.add(partitionHolder.getSerdeId());
        }
        final List<ListenableFuture<Void>> futures = Lists.newArrayList();
        final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
        futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL.SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
        final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
        if (!sdIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL.SQL_GET_SD_PARAMS, "sd_id", sdParams)));
        }
        final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
        if (!serdeIds.isEmpty()) {
            futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL.SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
        }
        ListenableFuture<List<Void>> future = null;
        try {
            future = Futures.allAsList(futures);
            final int getPartitionsDetailsTimeout = Integer.parseInt(configuration.getOrDefault(HiveConfigConstants.GET_PARTITION_DETAILS_TIMEOUT, "120"));
            future.get(getPartitionsDetailsTimeout, TimeUnit.SECONDS);
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            try {
                if (future != null) {
                    future.cancel(true);
                }
            } catch (Exception ignored) {
                log.warn("Failed cancelling the task that gets the partition details.");
            }
            Throwables.propagate(e);
        }
        for (PartitionHolder partitionHolder : partitions) {
            partitionHolder.getPartitionInfo().setMetadata(partitionParams.get(partitionHolder.getId()));
            partitionHolder.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionHolder.getSdId()));
            partitionHolder.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionHolder.getSerdeId()));
        }
    }
    return partitions;
}
Also used : HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) HiveConnectorFastServiceMetric(com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric) PartitionKeyParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionKeyParserEval) Date(java.util.Date) TimeoutException(java.util.concurrent.TimeoutException) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) PartitionParamParserEval(com.netflix.metacat.common.server.partition.visitor.PartitionParamParserEval) Matcher(java.util.regex.Matcher) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Collection(java.util.Collection) Pageable(com.netflix.metacat.common.dto.Pageable) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Optional(java.util.Optional) Pattern(java.util.regex.Pattern) Joiner(com.google.common.base.Joiner) Sort(com.netflix.metacat.common.dto.Sort) Types(java.sql.Types) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) JdbcTemplate(org.springframework.jdbc.core.JdbcTemplate) Strings(com.google.common.base.Strings) Lists(com.google.common.collect.Lists) ThreadServiceManager(com.netflix.metacat.common.server.util.ThreadServiceManager) ImmutableList(com.google.common.collect.ImmutableList) Qualifier(org.springframework.beans.factory.annotation.Qualifier) PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) Config(com.netflix.metacat.common.server.properties.Config) Nonnull(javax.annotation.Nonnull) Nullable(javax.annotation.Nullable) Throwables(com.google.common.base.Throwables) Maps(com.google.common.collect.Maps) Table(org.apache.hadoop.hive.metastore.api.Table) SqlParameterValue(org.springframework.jdbc.core.SqlParameterValue) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Futures(com.google.common.util.concurrent.Futures) StringReader(java.io.StringReader) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) VisibleForTesting(com.google.common.annotations.VisibleForTesting) HivePartitionKeyParserEval(com.netflix.metacat.connector.hive.util.HivePartitionKeyParserEval) ResultSetExtractor(org.springframework.jdbc.core.ResultSetExtractor) Transactional(org.springframework.transaction.annotation.Transactional) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) FilterPartition(com.netflix.metacat.common.server.partition.util.FilterPartition) HiveFilterPartition(com.netflix.metacat.connector.hive.util.HiveFilterPartition) QualifiedName(com.netflix.metacat.common.QualifiedName) TimeoutException(java.util.concurrent.TimeoutException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) ExecutionException(java.util.concurrent.ExecutionException) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map)

Aggregations

ResultSetExtractor (org.springframework.jdbc.core.ResultSetExtractor)12 Strings (com.google.common.base.Strings)10 Lists (com.google.common.collect.Lists)10 Maps (com.google.common.collect.Maps)10 QualifiedName (com.netflix.metacat.common.QualifiedName)10 Config (com.netflix.metacat.common.server.properties.Config)10 Types (java.sql.Types)10 List (java.util.List)10 Map (java.util.Map)10 Collectors (java.util.stream.Collectors)10 Slf4j (lombok.extern.slf4j.Slf4j)10 JdbcTemplate (org.springframework.jdbc.core.JdbcTemplate)10 SqlParameterValue (org.springframework.jdbc.core.SqlParameterValue)10 Transactional (org.springframework.transaction.annotation.Transactional)10 Joiner (com.google.common.base.Joiner)9 VisibleForTesting (com.google.common.annotations.VisibleForTesting)7 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)5 ConnectorException (com.netflix.metacat.common.server.connectors.exception.ConnectorException)5 HiveMetrics (com.netflix.metacat.connector.hive.monitoring.HiveMetrics)5 HiveConnectorFastServiceMetric (com.netflix.metacat.connector.hive.util.HiveConnectorFastServiceMetric)5