use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestVectorUDFAdaptor method testMultiArgumentUDF.
@Test
public void testMultiArgumentUDF() {
// create a syntax tree for a function call "testudf(col0, col1, col2)"
ExprNodeGenericFuncDesc funcDesc;
TypeInfo typeInfoStr = TypeInfoFactory.stringTypeInfo;
TypeInfo typeInfoLong = TypeInfoFactory.longTypeInfo;
TypeInfo typeInfoDbl = TypeInfoFactory.doubleTypeInfo;
GenericUDFBridge genericUDFBridge = new GenericUDFBridge("testudf", false, ConcatTextLongDoubleUDF.class.getName());
List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>();
children.add(new ExprNodeColumnDesc(typeInfoStr, "col0", "tablename", false));
children.add(new ExprNodeColumnDesc(typeInfoLong, "col1", "tablename", false));
children.add(new ExprNodeColumnDesc(typeInfoDbl, "col2", "tablename", false));
VectorUDFArgDesc[] argDescs = new VectorUDFArgDesc[3];
for (int i = 0; i < 3; i++) {
argDescs[i] = new VectorUDFArgDesc();
argDescs[i].setVariable(i);
}
funcDesc = new ExprNodeGenericFuncDesc(typeInfoStr, genericUDFBridge, genericUDFBridge.getUdfName(), children);
// create the adaptor for this function call to work in vector mode
VectorUDFAdaptor vudf = null;
try {
vudf = new VectorUDFAdaptor(funcDesc, 3, "String", argDescs);
} catch (HiveException e) {
// We should never get here.
assertTrue(false);
throw new RuntimeException(e);
}
// with no nulls
VectorizedRowBatch b = getBatchStrDblLongWithStrOut();
vudf.evaluate(b);
byte[] result = null;
byte[] result2 = null;
try {
result = "red:1:1.0".getBytes("UTF-8");
result2 = "blue:0:0.0".getBytes("UTF-8");
} catch (Exception e) {
;
}
BytesColumnVector out = (BytesColumnVector) b.cols[3];
int cmp = StringExpr.compare(result, 0, result.length, out.vector[1], out.start[1], out.length[1]);
assertEquals(0, cmp);
assertTrue(out.noNulls);
// with nulls
b = getBatchStrDblLongWithStrOut();
b.cols[1].noNulls = false;
vudf.evaluate(b);
out = (BytesColumnVector) b.cols[3];
assertFalse(out.noNulls);
assertTrue(out.isNull[1]);
// with all input columns repeating
b = getBatchStrDblLongWithStrOut();
b.cols[0].isRepeating = true;
b.cols[1].isRepeating = true;
b.cols[2].isRepeating = true;
vudf.evaluate(b);
out = (BytesColumnVector) b.cols[3];
assertTrue(out.isRepeating);
cmp = StringExpr.compare(result2, 0, result2.length, out.vector[0], out.start[0], out.length[0]);
assertEquals(0, cmp);
assertTrue(out.noNulls);
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestVectorUDFAdaptor method testLongUDF.
@Test
public void testLongUDF() {
// create a syntax tree for a simple function call "longudf(col0)"
ExprNodeGenericFuncDesc funcDesc;
TypeInfo typeInfo = TypeInfoFactory.longTypeInfo;
GenericUDFBridge genericUDFBridge = new GenericUDFBridge("longudf", false, LongUDF.class.getName());
List<ExprNodeDesc> children = new ArrayList<ExprNodeDesc>();
ExprNodeColumnDesc colDesc = new ExprNodeColumnDesc(typeInfo, "col0", "tablename", false);
children.add(colDesc);
VectorUDFArgDesc[] argDescs = new VectorUDFArgDesc[1];
argDescs[0] = new VectorUDFArgDesc();
argDescs[0].setVariable(0);
funcDesc = new ExprNodeGenericFuncDesc(typeInfo, genericUDFBridge, genericUDFBridge.getUdfName(), children);
// create the adaptor for this function call to work in vector mode
VectorUDFAdaptor vudf = null;
try {
vudf = new VectorUDFAdaptor(funcDesc, 1, "Long", argDescs);
} catch (HiveException e) {
// We should never get here.
assertTrue(false);
}
VectorizedRowBatch b = getBatchLongInLongOut();
vudf.evaluate(b);
// verify output
LongColumnVector out = (LongColumnVector) b.cols[1];
assertEquals(1000, out.vector[0]);
assertEquals(1001, out.vector[1]);
assertEquals(1002, out.vector[2]);
assertTrue(out.noNulls);
assertFalse(out.isRepeating);
// with nulls
b = getBatchLongInLongOut();
out = (LongColumnVector) b.cols[1];
b.cols[0].noNulls = false;
vudf.evaluate(b);
assertFalse(out.noNulls);
assertEquals(1000, out.vector[0]);
assertEquals(1001, out.vector[1]);
assertTrue(out.isNull[2]);
assertFalse(out.isRepeating);
// with repeating
b = getBatchLongInLongOut();
out = (LongColumnVector) b.cols[1];
b.cols[0].isRepeating = true;
vudf.evaluate(b);
// The implementation may or may not set output it isRepeting.
// That is implementation-defined.
assertTrue(b.cols[1].isRepeating && out.vector[0] == 1000 || !b.cols[1].isRepeating && out.vector[2] == 1000);
assertEquals(3, b.size);
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class QueryConditionBuilder method walkTreeAndTranslateColumnNames.
/*
* Translate column names by walking the AST
*/
private void walkTreeAndTranslateColumnNames(ExprNodeDesc node, Map<String, String> columnMap) {
if (node == null) {
return;
}
if (node instanceof ExprNodeColumnDesc) {
ExprNodeColumnDesc column = (ExprNodeColumnDesc) node;
String hiveColumnName = column.getColumn().toLowerCase();
if (columnMap.containsKey(hiveColumnName)) {
String dbColumnName = columnMap.get(hiveColumnName);
String finalName = formatColumnName(dbColumnName);
column.setColumn(finalName);
}
} else {
if (node.getChildren() != null) {
for (ExprNodeDesc childNode : node.getChildren()) {
walkTreeAndTranslateColumnNames(childNode, columnMap);
}
}
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method processingInOperator.
private void processingInOperator(ExprNodeGenericFuncDesc expr, List<IndexSearchCondition> searchConditions, boolean isNot, Object... nodeOutputs) {
ExprNodeColumnDesc columnDesc = null;
String[] fields = null;
if (LOG.isTraceEnabled()) {
LOG.trace("Processing In Operator. nodeOutputs : " + Lists.newArrayList(nodeOutputs));
}
if (nodeOutputs[0] instanceof ExprNodeFieldDesc) {
// rowKey field
ExprNodeFieldDesc fieldDesc = (ExprNodeFieldDesc) nodeOutputs[0];
fields = ExprNodeDescUtils.extractFields(fieldDesc);
ExprNodeDesc[] extracted = ExprNodeDescUtils.extractComparePair((ExprNodeDesc) nodeOutputs[0], (ExprNodeDesc) nodeOutputs[1]);
if (extracted == null) {
// adding for tez
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("nodeOutputs[0] : " + nodeOutputs[0] + ", nodeOutputs[1] : " + nodeOutputs[1] + " => " + Lists.newArrayList(extracted));
}
columnDesc = (ExprNodeColumnDesc) extracted[0];
} else if (nodeOutputs[0] instanceof ExprNodeGenericFuncDesc) {
columnDesc = (ExprNodeColumnDesc) ((ExprNodeGenericFuncDesc) nodeOutputs[0]).getChildren().get(0);
} else {
columnDesc = (ExprNodeColumnDesc) nodeOutputs[0];
}
String udfName = expr.getGenericUDF().getUdfName();
ExprNodeConstantDesc[] inConstantDescs = new ExprNodeConstantDesc[nodeOutputs.length - 1];
for (int i = 0, limit = inConstantDescs.length; i < limit; i++) {
if (!(nodeOutputs[i + 1] instanceof ExprNodeConstantDesc)) {
// adding for tez
return;
}
inConstantDescs[i] = (ExprNodeConstantDesc) nodeOutputs[i + 1];
}
searchConditions.add(new IndexSearchCondition(columnDesc, udfName, inConstantDescs, expr, fields, isNot));
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project phoenix by apache.
the class IndexPredicateAnalyzer method getColumnExpr.
// Check if ExprNodeColumnDesc is wrapped in expr.
// If so, peel off. Otherwise return itself.
private ExprNodeDesc getColumnExpr(ExprNodeDesc expr) {
if (expr instanceof ExprNodeColumnDesc) {
return expr;
}
ExprNodeGenericFuncDesc funcDesc = null;
if (expr instanceof ExprNodeGenericFuncDesc) {
funcDesc = (ExprNodeGenericFuncDesc) expr;
}
if (null == funcDesc) {
return expr;
}
GenericUDF udf = funcDesc.getGenericUDF();
// check if its a simple cast expression.
if ((udf instanceof GenericUDFBridge || udf instanceof GenericUDFToBinary || udf instanceof GenericUDFToChar || udf instanceof GenericUDFToVarchar || udf instanceof GenericUDFToDecimal || udf instanceof GenericUDFToDate || udf instanceof GenericUDFToUnixTimeStamp || udf instanceof GenericUDFToUtcTimestamp) && funcDesc.getChildren().size() == 1 && funcDesc.getChildren().get(0) instanceof ExprNodeColumnDesc) {
return expr.getChildren().get(0);
}
return expr;
}
Aggregations