use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestExpressionEvaluator method testExprNodeColumnEvaluator.
public void testExprNodeColumnEvaluator() throws Throwable {
try {
// get a evaluator for a simple field expression
ExprNodeDesc exprDesc = new ExprNodeColumnDesc(colaType, "cola", "", false);
ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(exprDesc);
// evaluate on row
ObjectInspector resultOI = eval.initialize(r.oi);
Object resultO = eval.evaluate(r.o);
Object standardResult = ObjectInspectorUtils.copyToStandardObject(resultO, resultOI, ObjectInspectorCopyOption.WRITABLE);
assertEquals(cola, standardResult);
System.out.println("ExprNodeColumnEvaluator ok");
} catch (Throwable e) {
e.printStackTrace();
throw e;
}
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestVectorExpressionWriters method getWriter.
private VectorExpressionWriter getWriter(TypeInfo colTypeInfo) throws HiveException {
ExprNodeDesc columnDesc = new ExprNodeColumnDesc();
columnDesc.setTypeInfo(colTypeInfo);
VectorExpressionWriter vew = VectorExpressionWriterFactory.genVectorExpressionWritable(columnDesc);
return vew;
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestParquetRowGroupFilter method testRowGroupFilterTakeEffect.
@Test
public void testRowGroupFilterTakeEffect() throws Exception {
// define schema
columnNames = "intCol";
columnTypes = "int";
StructObjectInspector inspector = getObjectInspector(columnNames, columnTypes);
MessageType fileSchema = MessageTypeParser.parseMessageType("message hive_schema {\n" + " optional int32 intCol;\n" + "}\n");
conf.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "intCol");
conf.set("columns", "intCol");
conf.set("columns.types", "int");
// create Parquet file with specific data
Path testPath = writeDirect("RowGroupFilterTakeEffect", fileSchema, new DirectWriter() {
@Override
public void write(RecordConsumer consumer) {
for (int i = 0; i < 100; i++) {
consumer.startMessage();
consumer.startField("int", 0);
consumer.addInteger(i);
consumer.endField("int", 0);
consumer.endMessage();
}
}
});
// > 50
GenericUDF udf = new GenericUDFOPGreaterThan();
List<ExprNodeDesc> children = Lists.newArrayList();
ExprNodeColumnDesc columnDesc = new ExprNodeColumnDesc(Integer.class, "intCol", "T", false);
ExprNodeConstantDesc constantDesc = new ExprNodeConstantDesc(50);
children.add(columnDesc);
children.add(constantDesc);
ExprNodeGenericFuncDesc genericFuncDesc = new ExprNodeGenericFuncDesc(inspector, udf, children);
String searchArgumentStr = SerializationUtilities.serializeExpression(genericFuncDesc);
conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, searchArgumentStr);
ParquetRecordReaderWrapper recordReader = (ParquetRecordReaderWrapper) new MapredParquetInputFormat().getRecordReader(new FileSplit(testPath, 0, fileLength(testPath), (String[]) null), conf, null);
Assert.assertEquals("row group is not filtered correctly", 1, recordReader.getFiltedBlocks().size());
// > 100
constantDesc = new ExprNodeConstantDesc(100);
children.set(1, constantDesc);
genericFuncDesc = new ExprNodeGenericFuncDesc(inspector, udf, children);
searchArgumentStr = SerializationUtilities.serializeExpression(genericFuncDesc);
conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, searchArgumentStr);
recordReader = (ParquetRecordReaderWrapper) new MapredParquetInputFormat().getRecordReader(new FileSplit(testPath, 0, fileLength(testPath), (String[]) null), conf, null);
Assert.assertEquals("row group is not filtered correctly", 0, recordReader.getFiltedBlocks().size());
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestColumnPrunerProcCtx method testGetSelectNestedColPathsFromChildren6.
// Test select abs(root.col1.b) from table test(root struct<col1:struct<a:boolean,b:double>,
// col2:double>);
@Test
public void testGetSelectNestedColPathsFromChildren6() {
ColumnPrunerProcCtx ctx = new ColumnPrunerProcCtx(null);
ExprNodeDesc colDesc = new ExprNodeColumnDesc(col3Type, "root", "test", false);
ExprNodeDesc col1 = new ExprNodeFieldDesc(col1Type, colDesc, "col1", false);
ExprNodeDesc fieldDesc = new ExprNodeFieldDesc(TypeInfoFactory.doubleTypeInfo, col1, "b", false);
final List<FieldNode> paths = Arrays.asList(new FieldNode("_col0"));
GenericUDF udf = mock(GenericUDFBridge.class);
List<ExprNodeDesc> list = new ArrayList<>();
list.add(fieldDesc);
ExprNodeDesc funcDesc = new ExprNodeGenericFuncDesc(TypeInfoFactory.binaryTypeInfo, udf, "abs", list);
SelectOperator selectOperator = buildSelectOperator(Arrays.asList(funcDesc), paths);
List<FieldNode> groups = ctx.getSelectColsFromChildren(selectOperator, paths);
compareTestResults(groups, "root.col1.b");
}
use of org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc in project hive by apache.
the class TestColumnPrunerProcCtx method testGetSelectNestedColPathsFromChildren2.
// Test select root.col1 from root:struct<col1:struct<a:boolean,b:double>,col2:double>
@Test
public void testGetSelectNestedColPathsFromChildren2() {
ColumnPrunerProcCtx ctx = new ColumnPrunerProcCtx(null);
ExprNodeDesc colDesc = new ExprNodeColumnDesc(col3Type, "root", "test", false);
ExprNodeDesc fieldDesc = new ExprNodeFieldDesc(col1Type, colDesc, "col1", false);
final List<FieldNode> paths = Arrays.asList(new FieldNode("_col0"));
SelectOperator selectOperator = buildSelectOperator(Arrays.asList(fieldDesc), paths);
List<FieldNode> groups = ctx.getSelectColsFromChildren(selectOperator, paths);
compareTestResults(groups, "root.col1");
}
Aggregations