use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project phoenix by apache.
the class ExpressionCompiler method visitLeave.
@Override
public Expression visitLeave(ComparisonParseNode node, List<Expression> children) throws SQLException {
ParseNode lhsNode = node.getChildren().get(0);
ParseNode rhsNode = node.getChildren().get(1);
Expression lhsExpr = children.get(0);
Expression rhsExpr = children.get(1);
CompareOp op = node.getFilterOp();
if (lhsNode instanceof RowValueConstructorParseNode && rhsNode instanceof RowValueConstructorParseNode) {
int i = 0;
for (; i < Math.min(lhsExpr.getChildren().size(), rhsExpr.getChildren().size()); i++) {
addBindParamMetaData(lhsNode.getChildren().get(i), rhsNode.getChildren().get(i), lhsExpr.getChildren().get(i), rhsExpr.getChildren().get(i));
}
for (; i < lhsExpr.getChildren().size(); i++) {
addBindParamMetaData(lhsNode.getChildren().get(i), null, lhsExpr.getChildren().get(i), null);
}
for (; i < rhsExpr.getChildren().size(); i++) {
addBindParamMetaData(null, rhsNode.getChildren().get(i), null, rhsExpr.getChildren().get(i));
}
} else if (lhsExpr instanceof RowValueConstructorExpression) {
addBindParamMetaData(lhsNode.getChildren().get(0), rhsNode, lhsExpr.getChildren().get(0), rhsExpr);
for (int i = 1; i < lhsExpr.getChildren().size(); i++) {
addBindParamMetaData(lhsNode.getChildren().get(i), null, lhsExpr.getChildren().get(i), null);
}
} else if (rhsExpr instanceof RowValueConstructorExpression) {
addBindParamMetaData(lhsNode, rhsNode.getChildren().get(0), lhsExpr, rhsExpr.getChildren().get(0));
for (int i = 1; i < rhsExpr.getChildren().size(); i++) {
addBindParamMetaData(null, rhsNode.getChildren().get(i), null, rhsExpr.getChildren().get(i));
}
} else {
addBindParamMetaData(lhsNode, rhsNode, lhsExpr, rhsExpr);
}
return wrapGroupByExpression(ComparisonExpression.create(op, children, context.getTempPtr(), context.getCurrentTable().getTable().rowKeyOrderOptimizable()));
}
use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project phoenix by apache.
the class PrefixFunction method newKeyPart.
@Override
public KeyPart newKeyPart(final KeyPart childPart) {
return new KeyPart() {
private final List<Expression> extractNodes = extractNode() ? Collections.<Expression>singletonList(PrefixFunction.this) : Collections.<Expression>emptyList();
@Override
public PColumn getColumn() {
return childPart.getColumn();
}
@Override
public List<Expression> getExtractNodes() {
return extractNodes;
}
@Override
public KeyRange getKeyRange(CompareOp op, Expression rhs) {
byte[] lowerRange = KeyRange.UNBOUND;
byte[] upperRange = KeyRange.UNBOUND;
boolean lowerInclusive = true;
PDataType type = getColumn().getDataType();
switch(op) {
case EQUAL:
lowerRange = evaluateExpression(rhs);
upperRange = ByteUtil.nextKey(lowerRange);
break;
case GREATER:
lowerRange = ByteUtil.nextKey(evaluateExpression(rhs));
break;
case LESS_OR_EQUAL:
upperRange = ByteUtil.nextKey(evaluateExpression(rhs));
lowerInclusive = false;
break;
default:
return childPart.getKeyRange(op, rhs);
}
PColumn column = getColumn();
Integer length = column.getMaxLength();
if (type.isFixedWidth()) {
if (length != null) {
// *after* rows with no padding.
if (lowerRange != KeyRange.UNBOUND) {
lowerRange = type.pad(lowerRange, length, SortOrder.ASC);
}
if (upperRange != KeyRange.UNBOUND) {
upperRange = type.pad(upperRange, length, SortOrder.ASC);
}
}
} else if (column.getSortOrder() == SortOrder.DESC && getTable().rowKeyOrderOptimizable()) {
// a lowerRange of 'a\xFF' would skip 'ab', while 'a\x00\xFF' would not.
if (lowerRange != KeyRange.UNBOUND) {
lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 1);
lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE;
}
}
return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, false);
}
@Override
public PTable getTable() {
return childPart.getTable();
}
};
}
use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project phoenix by apache.
the class RTrimFunction method newKeyPart.
@Override
public KeyPart newKeyPart(final KeyPart childPart) {
return new KeyPart() {
@Override
public KeyRange getKeyRange(CompareOp op, Expression rhs) {
byte[] lowerRange = KeyRange.UNBOUND;
byte[] upperRange = KeyRange.UNBOUND;
boolean lowerInclusive = true;
boolean upperInclusive = false;
PDataType type = getColumn().getDataType();
SortOrder sortOrder = getColumn().getSortOrder();
switch(op) {
case LESS_OR_EQUAL:
lowerInclusive = false;
case EQUAL:
upperRange = evaluateExpression(rhs);
if (op == CompareOp.EQUAL) {
lowerRange = upperRange;
}
if (sortOrder == SortOrder.ASC || !getTable().rowKeyOrderOptimizable()) {
upperRange = Arrays.copyOf(upperRange, upperRange.length + 1);
upperRange[upperRange.length - 1] = StringUtil.SPACE_UTF8;
ByteUtil.nextKey(upperRange, upperRange.length);
} else {
upperInclusive = true;
if (op == CompareOp.LESS_OR_EQUAL) {
// will be the RHS value.
break;
}
/*
* Somewhat tricky to get the range correct for the DESC equality case.
* The lower range is the RHS value followed by any number of inverted spaces.
* We need to add a zero byte as the lower range will have an \xFF byte
* appended to it and otherwise we'd skip past any rows where there is more
* than one space following the RHS.
* The upper range should span up to and including the RHS value. We need
* to add our own \xFF as otherwise this will look like a degenerate query
* since the lower would be bigger than the upper range.
*/
lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 2);
lowerRange[lowerRange.length - 2] = StringUtil.INVERTED_SPACE_UTF8;
lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE;
upperRange = Arrays.copyOf(upperRange, upperRange.length + 1);
upperRange[upperRange.length - 1] = QueryConstants.DESC_SEPARATOR_BYTE;
}
break;
default:
// TOOD: Is this ok for DESC?
return childPart.getKeyRange(op, rhs);
}
Integer length = getColumn().getMaxLength();
if (type.isFixedWidth() && length != null) {
// *after* rows with no padding.
if (lowerRange != KeyRange.UNBOUND) {
lowerRange = type.pad(lowerRange, length, SortOrder.ASC);
}
if (upperRange != KeyRange.UNBOUND) {
upperRange = type.pad(upperRange, length, SortOrder.ASC);
}
}
return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive);
}
@Override
public List<Expression> getExtractNodes() {
// non blank characters such as 'foo bar' where the RHS constant is 'foo'.
return Collections.<Expression>emptyList();
}
@Override
public PColumn getColumn() {
return childPart.getColumn();
}
@Override
public PTable getTable() {
return childPart.getTable();
}
};
}
use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project cxf by apache.
the class HBaseQueryVisitor method buildSimpleQuery.
private Filter buildSimpleQuery(ConditionType ct, String name, Object value) {
name = super.getRealPropertyName(name);
validatePropertyValue(name, value);
Class<?> clazz = getPrimitiveFieldClass(name, value.getClass());
CompareOp compareOp = null;
boolean regexCompRequired = false;
switch(ct) {
case EQUALS:
compareOp = CompareOp.EQUAL;
regexCompRequired = String.class == clazz && value.toString().endsWith("*");
break;
case NOT_EQUALS:
compareOp = CompareOp.NOT_EQUAL;
regexCompRequired = String.class == clazz && value.toString().endsWith("*");
break;
case GREATER_THAN:
compareOp = CompareOp.GREATER;
break;
case GREATER_OR_EQUALS:
compareOp = CompareOp.GREATER_OR_EQUAL;
break;
case LESS_THAN:
compareOp = CompareOp.LESS;
break;
case LESS_OR_EQUALS:
compareOp = CompareOp.LESS_OR_EQUAL;
break;
default:
break;
}
String qualifier = name;
String theFamily = family != null ? family : familyMap.get(qualifier);
ByteArrayComparable byteArrayComparable = regexCompRequired ? new RegexStringComparator(value.toString().replace("*", ".")) : new BinaryComparator(value.toString().getBytes(StandardCharsets.UTF_8));
return new SingleColumnValueFilter(theFamily.getBytes(StandardCharsets.UTF_8), qualifier.getBytes(StandardCharsets.UTF_8), compareOp, byteArrayComparable);
}
use of org.apache.hadoop.hbase.filter.CompareFilter.CompareOp in project drill by axbaretto.
the class HBaseFilterBuilder method createHBaseScanSpec.
private HBaseScanSpec createHBaseScanSpec(FunctionCall call, CompareFunctionsProcessor processor) {
String functionName = processor.getFunctionName();
SchemaPath field = processor.getPath();
byte[] fieldValue = processor.getValue();
boolean sortOrderAscending = processor.isSortOrderAscending();
boolean isRowKey = field.getRootSegmentPath().equals(ROW_KEY);
if (!(isRowKey || (!field.getRootSegment().isLastPath() && field.getRootSegment().getChild().isLastPath() && field.getRootSegment().getChild().isNamed()))) {
/*
* if the field in this function is neither the row_key nor a qualified HBase column, return.
*/
return null;
}
if (processor.isRowKeyPrefixComparison()) {
return createRowKeyPrefixScanSpec(call, processor);
}
CompareOp compareOp = null;
boolean isNullTest = false;
ByteArrayComparable comparator = new BinaryComparator(fieldValue);
byte[] startRow = HConstants.EMPTY_START_ROW;
byte[] stopRow = HConstants.EMPTY_END_ROW;
switch(functionName) {
case "equal":
compareOp = CompareOp.EQUAL;
if (isRowKey) {
startRow = fieldValue;
/* stopRow should be just greater than 'value'*/
stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
compareOp = CompareOp.EQUAL;
}
break;
case "not_equal":
compareOp = CompareOp.NOT_EQUAL;
break;
case "greater_than_or_equal_to":
if (sortOrderAscending) {
compareOp = CompareOp.GREATER_OR_EQUAL;
if (isRowKey) {
startRow = fieldValue;
}
} else {
compareOp = CompareOp.LESS_OR_EQUAL;
if (isRowKey) {
// stopRow should be just greater than 'value'
stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
}
break;
case "greater_than":
if (sortOrderAscending) {
compareOp = CompareOp.GREATER;
if (isRowKey) {
// startRow should be just greater than 'value'
startRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
} else {
compareOp = CompareOp.LESS;
if (isRowKey) {
stopRow = fieldValue;
}
}
break;
case "less_than_or_equal_to":
if (sortOrderAscending) {
compareOp = CompareOp.LESS_OR_EQUAL;
if (isRowKey) {
// stopRow should be just greater than 'value'
stopRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
} else {
compareOp = CompareOp.GREATER_OR_EQUAL;
if (isRowKey) {
startRow = fieldValue;
}
}
break;
case "less_than":
if (sortOrderAscending) {
compareOp = CompareOp.LESS;
if (isRowKey) {
stopRow = fieldValue;
}
} else {
compareOp = CompareOp.GREATER;
if (isRowKey) {
// startRow should be just greater than 'value'
startRow = Arrays.copyOf(fieldValue, fieldValue.length + 1);
}
}
break;
case "isnull":
case "isNull":
case "is null":
if (isRowKey) {
return null;
}
isNullTest = true;
compareOp = CompareOp.EQUAL;
comparator = new NullComparator();
break;
case "isnotnull":
case "isNotNull":
case "is not null":
if (isRowKey) {
return null;
}
compareOp = CompareOp.NOT_EQUAL;
comparator = new NullComparator();
break;
case "like":
/*
* Convert the LIKE operand to Regular Expression pattern so that we can
* apply RegexStringComparator()
*/
HBaseRegexParser parser = new HBaseRegexParser(call).parse();
compareOp = CompareOp.EQUAL;
comparator = new RegexStringComparator(parser.getRegexString());
/*
* We can possibly do better if the LIKE operator is on the row_key
*/
if (isRowKey) {
String prefix = parser.getPrefixString();
if (prefix != null) {
/*
* If there is a literal prefix, it can help us prune the scan to a sub range
*/
if (prefix.equals(parser.getLikeString())) {
/* The operand value is literal. This turns the LIKE operator to EQUAL operator */
startRow = stopRow = fieldValue;
compareOp = null;
} else {
startRow = prefix.getBytes(Charsets.UTF_8);
stopRow = startRow.clone();
boolean isMaxVal = true;
for (int i = stopRow.length - 1; i >= 0; --i) {
int nextByteValue = (0xff & stopRow[i]) + 1;
if (nextByteValue < 0xff) {
stopRow[i] = (byte) nextByteValue;
isMaxVal = false;
break;
} else {
stopRow[i] = 0;
}
}
if (isMaxVal) {
stopRow = HConstants.EMPTY_END_ROW;
}
}
}
}
break;
}
if (compareOp != null || startRow != HConstants.EMPTY_START_ROW || stopRow != HConstants.EMPTY_END_ROW) {
Filter filter = null;
if (isRowKey) {
if (compareOp != null) {
filter = new RowFilter(compareOp, comparator);
}
} else {
byte[] family = HBaseUtils.getBytes(field.getRootSegment().getPath());
byte[] qualifier = HBaseUtils.getBytes(field.getRootSegment().getChild().getNameSegment().getPath());
filter = new SingleColumnValueFilter(family, qualifier, compareOp, comparator);
((SingleColumnValueFilter) filter).setLatestVersionOnly(true);
if (!isNullTest) {
((SingleColumnValueFilter) filter).setFilterIfMissing(true);
}
}
return new HBaseScanSpec(groupScan.getTableName(), startRow, stopRow, filter);
}
// else
return null;
}
Aggregations