use of com.netflix.metacat.connector.hive.util.PartitionFilterGenerator in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getHandlerResults.
private <T> List<T> getHandlerResults(final String databaseName, final String tableName, final String filterExpression, final List<String> partitionIds, final String sql, final ResultSetHandler<List<T>> resultSetHandler, final Sort sort, final Pageable pageable) {
List<T> partitions;
try {
if (!Strings.isNullOrEmpty(filterExpression)) {
final PartitionFilterGenerator generator = new PartitionFilterGenerator(getPartitionKeys(databaseName, tableName));
String filterSql = (String) new PartitionParser(new StringReader(filterExpression)).filter().jjtAccept(generator, null);
if (generator.isOptimized()) {
filterSql = generator.getOptimizedSql();
}
if (filterSql != null && !filterSql.isEmpty()) {
filterSql = " and (" + filterSql + ")";
}
partitions = gethandlerresults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetHandler, generator.joinSql(), filterSql, generator.getParams(), sort, pageable);
} else {
partitions = gethandlerresults(databaseName, tableName, null, partitionIds, sql, resultSetHandler, null, null, null, sort, pageable);
}
} catch (Exception e) {
log.warn("Experiment: Get partitions for for table {} filter {}" + " failed with error {}", tableName, filterExpression, e.getMessage());
registry.counter(HiveMetrics.CounterHiveExperimentGetTablePartitionsFailure.name()).increment();
partitions = gethandlerresults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetHandler, null, prepareFilterSql(filterExpression), Lists.newArrayList(), sort, pageable);
}
return partitions;
}
use of com.netflix.metacat.connector.hive.util.PartitionFilterGenerator in project metacat by Netflix.
the class DirectSqlGetPartition method getHandlerResults.
private <T> List<T> getHandlerResults(final String databaseName, final String tableName, @Nullable final String filterExpression, @Nullable final List<String> partitionIds, final String sql, final ResultSetExtractor<List<T>> resultSetExtractor, @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean forceDisableAudit) {
List<T> partitions;
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
try {
if (!Strings.isNullOrEmpty(filterExpression)) {
final PartitionFilterGenerator generator = new PartitionFilterGenerator(getPartitionKeys(databaseName, tableName, forceDisableAudit), config.escapePartitionNameOnFilter());
String filterSql = (String) new PartitionParser(new StringReader(filterExpression)).filter().jjtAccept(generator, null);
if (generator.isOptimized()) {
filterSql = generator.getOptimizedSql();
}
if (filterSql != null && !filterSql.isEmpty()) {
filterSql = " and (" + filterSql + ")";
}
partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetExtractor, generator.joinSql(), filterSql, generator.getParams(), sort, pageable, forceDisableAudit);
} else {
partitions = getHandlerResults(databaseName, tableName, null, partitionIds, sql, resultSetExtractor, null, null, null, sort, pageable, forceDisableAudit);
}
} catch (Exception e) {
log.warn("Experiment: Get partitions for for table {} filter {}" + " failed with error {}", tableQName.toString(), filterExpression, e.getMessage());
registry.counter(registry.createId(HiveMetrics.CounterHiveExperimentGetTablePartitionsFailure.getMetricName()).withTags(tableQName.parts())).increment();
partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, sql, resultSetExtractor, null, prepareFilterSql(filterExpression), Lists.newArrayList(), sort, pageable, forceDisableAudit);
}
return partitions;
}
Aggregations