Search in sources :

Example 1 with PartitionFilterGenerator

use of com.netflix.metacat.connector.hive.util.PartitionFilterGenerator in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getHandlerResults.

private <T> List<T> getHandlerResults(final String databaseName, final String tableName, final String filterExpression, final List<String> partitionIds, final String sql, final ResultSetHandler<List<T>> resultSetHandler, final Sort sort, final Pageable pageable) {
    List<T> partitions;
    try {
        if (!Strings.isNullOrEmpty(filterExpression)) {
            final PartitionFilterGenerator generator = new PartitionFilterGenerator(getPartitionKeys(databaseName, tableName));
            String filterSql = (String) new PartitionParser(new StringReader(filterExpression)).filter().jjtAccept(generator, null);
            if (generator.isOptimized()) {
                filterSql = generator.getOptimizedSql();
            }
            if (filterSql != null && !filterSql.isEmpty()) {
                filterSql = " and (" + filterSql + ")";
            }
            partitions = gethandlerresults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetHandler, generator.joinSql(), filterSql, generator.getParams(), sort, pageable);
        } else {
            partitions = gethandlerresults(databaseName, tableName, null, partitionIds, sql, resultSetHandler, null, null, null, sort, pageable);
        }
    } catch (Exception e) {
        log.warn("Experiment: Get partitions for for table {} filter {}" + " failed with error {}", tableName, filterExpression, e.getMessage());
        registry.counter(HiveMetrics.CounterHiveExperimentGetTablePartitionsFailure.name()).increment();
        partitions = gethandlerresults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetHandler, null, prepareFilterSql(filterExpression), Lists.newArrayList(), sort, pageable);
    }
    return partitions;
}
Also used : PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) StringReader(java.io.StringReader) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) SQLException(java.sql.SQLException)

Example 2 with PartitionFilterGenerator

use of com.netflix.metacat.connector.hive.util.PartitionFilterGenerator in project metacat by Netflix.

the class DirectSqlGetPartition method getHandlerResults.

private <T> List<T> getHandlerResults(final String databaseName, final String tableName, @Nullable final String filterExpression, @Nullable final List<String> partitionIds, final String sql, final ResultSetExtractor<List<T>> resultSetExtractor, @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean forceDisableAudit) {
    List<T> partitions;
    final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
    try {
        if (!Strings.isNullOrEmpty(filterExpression)) {
            final PartitionFilterGenerator generator = new PartitionFilterGenerator(getPartitionKeys(databaseName, tableName, forceDisableAudit), config.escapePartitionNameOnFilter());
            String filterSql = (String) new PartitionParser(new StringReader(filterExpression)).filter().jjtAccept(generator, null);
            if (generator.isOptimized()) {
                filterSql = generator.getOptimizedSql();
            }
            if (filterSql != null && !filterSql.isEmpty()) {
                filterSql = " and (" + filterSql + ")";
            }
            partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetExtractor, generator.joinSql(), filterSql, generator.getParams(), sort, pageable, forceDisableAudit);
        } else {
            partitions = getHandlerResults(databaseName, tableName, null, partitionIds, sql, resultSetExtractor, null, null, null, sort, pageable, forceDisableAudit);
        }
    } catch (Exception e) {
        log.warn("Experiment: Get partitions for for table {} filter {}" + " failed with error {}", tableQName.toString(), filterExpression, e.getMessage());
        registry.counter(registry.createId(HiveMetrics.CounterHiveExperimentGetTablePartitionsFailure.getMetricName()).withTags(tableQName.parts())).increment();
        partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetExtractor, null, prepareFilterSql(filterExpression), Lists.newArrayList(), sort, pageable, forceDisableAudit);
    }
    return partitions;
}
Also used : PartitionParser(com.netflix.metacat.common.server.partition.parser.PartitionParser) QualifiedName(com.netflix.metacat.common.QualifiedName) StringReader(java.io.StringReader) PartitionFilterGenerator(com.netflix.metacat.connector.hive.util.PartitionFilterGenerator) TimeoutException(java.util.concurrent.TimeoutException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

ConnectorException (com.netflix.metacat.common.server.connectors.exception.ConnectorException)2 PartitionParser (com.netflix.metacat.common.server.partition.parser.PartitionParser)2 PartitionFilterGenerator (com.netflix.metacat.connector.hive.util.PartitionFilterGenerator)2 StringReader (java.io.StringReader)2 QualifiedName (com.netflix.metacat.common.QualifiedName)1 SQLException (java.sql.SQLException)1 ExecutionException (java.util.concurrent.ExecutionException)1 TimeoutException (java.util.concurrent.TimeoutException)1