use of org.apache.hadoop.hive.ql.index.IndexSearchCondition in project hive by apache.
the class SampleHBaseKeyFactory2 method setupFilter.
private HBaseScanRange setupFilter(String keyColName, List<IndexSearchCondition> conditions) throws IOException {
Map<String, List<IndexSearchCondition>> fieldConds = new HashMap<String, List<IndexSearchCondition>>();
for (IndexSearchCondition condition : conditions) {
assert keyColName.equals(condition.getColumnDesc().getColumn());
String fieldName = condition.getFields()[0];
List<IndexSearchCondition> fieldCond = fieldConds.get(fieldName);
if (fieldCond == null) {
fieldConds.put(fieldName, fieldCond = new ArrayList<IndexSearchCondition>());
}
fieldCond.add(condition);
}
HBaseScanRange range = new HBaseScanRange();
ByteArrayOutputStream startRow = new ByteArrayOutputStream();
ByteArrayOutputStream stopRow = new ByteArrayOutputStream();
StructTypeInfo type = (StructTypeInfo) keyMapping.columnType;
for (String name : type.getAllStructFieldNames()) {
List<IndexSearchCondition> fieldCond = fieldConds.get(name);
if (fieldCond == null || fieldCond.size() > 2) {
continue;
}
byte[] startElement = null;
byte[] stopElement = null;
for (IndexSearchCondition condition : fieldCond) {
if (condition.getConstantDesc().getValue() == null) {
continue;
}
String comparisonOp = condition.getComparisonOp();
String constantVal = String.valueOf(condition.getConstantDesc().getValue());
if (comparisonOp.endsWith("UDFOPEqual")) {
startElement = toBinary(constantVal, FIXED_LENGTH, false, false);
stopElement = toBinary(constantVal, FIXED_LENGTH, true, true);
} else if (comparisonOp.endsWith("UDFOPEqualOrGreaterThan")) {
startElement = toBinary(constantVal, FIXED_LENGTH, false, false);
} else if (comparisonOp.endsWith("UDFOPGreaterThan")) {
startElement = toBinary(constantVal, FIXED_LENGTH, false, true);
} else if (comparisonOp.endsWith("UDFOPEqualOrLessThan")) {
stopElement = toBinary(constantVal, FIXED_LENGTH, true, false);
} else if (comparisonOp.endsWith("UDFOPLessThan")) {
stopElement = toBinary(constantVal, FIXED_LENGTH, true, true);
} else {
throw new IOException(comparisonOp + " is not a supported comparison operator");
}
}
if (startRow != null) {
if (startElement != null) {
startRow.write(startElement);
} else {
if (startRow.size() > 0) {
range.setStartRow(startRow.toByteArray());
}
startRow = null;
}
}
if (stopRow != null) {
if (stopElement != null) {
stopRow.write(stopElement);
} else {
if (stopRow.size() > 0) {
range.setStopRow(stopRow.toByteArray());
}
stopRow = null;
}
}
if (startElement == null && stopElement == null) {
break;
}
}
if (startRow != null && startRow.size() > 0) {
range.setStartRow(startRow.toByteArray());
}
if (stopRow != null && stopRow.size() > 0) {
range.setStopRow(stopRow.toByteArray());
}
return range;
}
use of org.apache.hadoop.hive.ql.index.IndexSearchCondition in project hive by apache.
the class KuduPredicateHandler method decompose.
/**
* Analyzes the predicates and return the portion of it which
* cannot be evaluated by Kudu during table access.
*
* @param predicateExpr predicate to be decomposed
* @param schema the schema of the Kudu table
* @return decomposed form of predicate, or null if no pushdown is possible at all
*/
public static DecomposedPredicate decompose(ExprNodeDesc predicateExpr, Schema schema) {
IndexPredicateAnalyzer analyzer = newAnalyzer(schema);
List<IndexSearchCondition> sConditions = new ArrayList<>();
ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicateExpr, sConditions);
// Nothing to decompose.
if (sConditions.size() == 0) {
return null;
}
DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(sConditions);
decomposedPredicate.residualPredicate = (ExprNodeGenericFuncDesc) residualPredicate;
return decomposedPredicate;
}
use of org.apache.hadoop.hive.ql.index.IndexSearchCondition in project hive by apache.
the class HiveHBaseTableInputFormat method setupKeyRange.
private void setupKeyRange(Scan scan, List<IndexSearchCondition> conditions, boolean isBinary) throws IOException {
// Convert the search condition into a restriction on the HBase scan
byte[] startRow = HConstants.EMPTY_START_ROW, stopRow = HConstants.EMPTY_END_ROW;
for (IndexSearchCondition sc : conditions) {
ExprNodeConstantEvaluator eval = new ExprNodeConstantEvaluator(sc.getConstantDesc());
PrimitiveObjectInspector objInspector;
Object writable;
try {
objInspector = (PrimitiveObjectInspector) eval.initialize(null);
writable = eval.evaluate(null);
} catch (ClassCastException cce) {
throw new IOException("Currently only primitve types are supported. Found: " + sc.getConstantDesc().getTypeString());
} catch (HiveException e) {
throw new IOException(e);
}
byte[] constantVal = getConstantVal(writable, objInspector, isBinary);
String comparisonOp = sc.getComparisonOp();
if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual".equals(comparisonOp)) {
startRow = constantVal;
stopRow = getNextBA(constantVal);
} else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan".equals(comparisonOp)) {
stopRow = constantVal;
} else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan".equals(comparisonOp)) {
startRow = constantVal;
} else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan".equals(comparisonOp)) {
startRow = getNextBA(constantVal);
} else if ("org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan".equals(comparisonOp)) {
stopRow = getNextBA(constantVal);
} else {
throw new IOException(comparisonOp + " is not a supported comparison operator");
}
}
scan.setStartRow(startRow);
scan.setStopRow(stopRow);
if (LOG.isDebugEnabled()) {
LOG.debug(Bytes.toStringBinary(startRow) + " ~ " + Bytes.toStringBinary(stopRow));
}
}
use of org.apache.hadoop.hive.ql.index.IndexSearchCondition in project hive by apache.
the class BitmapIndexHandler method decomposePredicate.
/**
* Split the predicate into the piece we can deal with (pushed), and the one we can't (residual)
* @param predicate
* @param index
* @return
*/
private Map<Index, ExprNodeDesc> decomposePredicate(ExprNodeDesc predicate, List<Index> indexes, HiveIndexQueryContext queryContext) {
Map<Index, ExprNodeDesc> indexPredicates = new HashMap<Index, ExprNodeDesc>();
// compute overall residual
IndexPredicateAnalyzer analyzer = getIndexPredicateAnalyzer(indexes, queryContext.getQueryPartitions());
List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>();
ExprNodeDesc residualPredicate = analyzer.analyzePredicate(predicate, searchConditions);
// pass residual predicate back out for further processing
queryContext.setResidualPredicate(residualPredicate);
if (searchConditions.size() == 0) {
return null;
}
for (Index index : indexes) {
ArrayList<Index> in = new ArrayList<Index>(1);
in.add(index);
analyzer = getIndexPredicateAnalyzer(in, queryContext.getQueryPartitions());
searchConditions = new ArrayList<IndexSearchCondition>();
// split predicate into pushed (what we can handle), and residual (what we can't handle)
// pushed predicate from translateSearchConditions is stored for the current index
// This ensures that we apply all possible predicates to each index
analyzer.analyzePredicate(predicate, searchConditions);
if (searchConditions.size() == 0) {
indexPredicates.put(index, null);
} else {
indexPredicates.put(index, analyzer.translateSearchConditions(searchConditions));
}
}
return indexPredicates;
}
use of org.apache.hadoop.hive.ql.index.IndexSearchCondition in project hive by apache.
the class CompactIndexHandler method decomposePredicate.
/**
* Split the predicate into the piece we can deal with (pushed), and the one we can't (residual)
* @param predicate
* @param index
* @return
*/
private DecomposedPredicate decomposePredicate(ExprNodeDesc predicate, Index index, Set<Partition> queryPartitions) {
IndexPredicateAnalyzer analyzer = getIndexPredicateAnalyzer(index, queryPartitions);
List<IndexSearchCondition> searchConditions = new ArrayList<IndexSearchCondition>();
// split predicate into pushed (what we can handle), and residual (what we can't handle)
ExprNodeGenericFuncDesc residualPredicate = (ExprNodeGenericFuncDesc) analyzer.analyzePredicate(predicate, searchConditions);
if (searchConditions.size() == 0) {
return null;
}
int numIndexCols = 0;
for (IndexSearchCondition searchCondition : searchConditions) {
if (!partitionCols.contains(searchCondition.getColumnDesc().getColumn())) {
numIndexCols++;
}
}
// For now, only works if the predicate has a single condition on an index column
if (numIndexCols == 1) {
useSorted = true;
} else {
useSorted = false;
}
DecomposedPredicate decomposedPredicate = new DecomposedPredicate();
decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(searchConditions);
decomposedPredicate.residualPredicate = residualPredicate;
return decomposedPredicate;
}
Aggregations