use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestVectorBetweenIn method doVectorBetweenInTest.
private boolean doVectorBetweenInTest(TypeInfo typeInfo, BetweenInVariation betweenInVariation, List<Object> compareList, List<String> columns, String[] columnNames, TypeInfo[] typeInfos, DataTypePhysicalVariation[] dataTypePhysicalVariations, List<ExprNodeDesc> children, GenericUDF udf, ExprNodeGenericFuncDesc exprDesc, BetweenInTestMode betweenInTestMode, VectorRandomBatchSource batchSource, ObjectInspector objectInspector, TypeInfo outputTypeInfo, Object[] resultObjects) throws Exception {
HiveConf hiveConf = new HiveConf();
if (betweenInTestMode == BetweenInTestMode.ADAPTOR) {
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_VECTOR_ADAPTOR_OVERRIDE, true);
}
final boolean isFilter = betweenInVariation.isFilter;
VectorizationContext vectorizationContext = new VectorizationContext("name", columns, Arrays.asList(typeInfos), Arrays.asList(dataTypePhysicalVariations), hiveConf);
VectorExpression vectorExpression = vectorizationContext.getVectorExpression(exprDesc, (isFilter ? VectorExpressionDescriptor.Mode.FILTER : VectorExpressionDescriptor.Mode.PROJECTION));
vectorExpression.transientInit(hiveConf);
if (betweenInTestMode == BetweenInTestMode.VECTOR_EXPRESSION) {
String vecExprString = vectorExpression.toString();
if (vectorExpression instanceof VectorUDFAdaptor) {
System.out.println("*NO NATIVE VECTOR EXPRESSION* typeInfo " + typeInfo.toString() + " betweenInTestMode " + betweenInTestMode + " betweenInVariation " + betweenInVariation + " vectorExpression " + vecExprString);
} else if (dataTypePhysicalVariations[0] == DataTypePhysicalVariation.DECIMAL_64) {
final String nameToCheck = vectorExpression.getClass().getSimpleName();
if (!nameToCheck.contains("Decimal64")) {
System.out.println("*EXPECTED DECIMAL_64 VECTOR EXPRESSION* typeInfo " + typeInfo.toString() + " betweenInTestMode " + betweenInTestMode + " betweenInVariation " + betweenInVariation + " vectorExpression " + vecExprString);
}
}
}
// System.out.println("*VECTOR EXPRESSION* " + vectorExpression.getClass().getSimpleName());
/*
System.out.println(
"*DEBUG* typeInfo " + typeInfo.toString() +
" betweenInTestMode " + betweenInTestMode +
" betweenInVariation " + betweenInVariation +
" vectorExpression " + vectorExpression.toString());
*/
VectorRandomRowSource rowSource = batchSource.getRowSource();
VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx(columnNames, rowSource.typeInfos(), rowSource.dataTypePhysicalVariations(), /* dataColumnNums */
null, /* partitionColumnCount */
0, /* virtualColumnCount */
0, /* neededVirtualColumns */
null, vectorizationContext.getScratchColumnTypeNames(), vectorizationContext.getScratchDataTypePhysicalVariations());
VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();
VectorExtractRow resultVectorExtractRow = null;
Object[] scrqtchRow = null;
if (!isFilter) {
resultVectorExtractRow = new VectorExtractRow();
final int outputColumnNum = vectorExpression.getOutputColumnNum();
resultVectorExtractRow.init(new TypeInfo[] { outputTypeInfo }, new int[] { outputColumnNum });
scrqtchRow = new Object[1];
}
boolean copySelectedInUse = false;
int[] copySelected = new int[VectorizedRowBatch.DEFAULT_SIZE];
batchSource.resetBatchIteration();
int rowIndex = 0;
while (true) {
if (!batchSource.fillNextBatch(batch)) {
break;
}
final int originalBatchSize = batch.size;
if (isFilter) {
copySelectedInUse = batch.selectedInUse;
if (batch.selectedInUse) {
System.arraycopy(batch.selected, 0, copySelected, 0, originalBatchSize);
}
}
// In filter mode, the batch size can be made smaller.
vectorExpression.evaluate(batch);
if (!isFilter) {
extractResultObjects(batch, rowIndex, resultVectorExtractRow, scrqtchRow, objectInspector, resultObjects);
} else {
final int currentBatchSize = batch.size;
if (copySelectedInUse && batch.selectedInUse) {
int selectIndex = 0;
for (int i = 0; i < originalBatchSize; i++) {
final int originalBatchIndex = copySelected[i];
final boolean booleanResult;
if (selectIndex < currentBatchSize && batch.selected[selectIndex] == originalBatchIndex) {
booleanResult = true;
selectIndex++;
} else {
booleanResult = false;
}
resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
}
} else if (batch.selectedInUse) {
int selectIndex = 0;
for (int i = 0; i < originalBatchSize; i++) {
final boolean booleanResult;
if (selectIndex < currentBatchSize && batch.selected[selectIndex] == i) {
booleanResult = true;
selectIndex++;
} else {
booleanResult = false;
}
resultObjects[rowIndex + i] = new BooleanWritable(booleanResult);
}
} else if (currentBatchSize == 0) {
// Whole batch got zapped.
for (int i = 0; i < originalBatchSize; i++) {
resultObjects[rowIndex + i] = new BooleanWritable(false);
}
} else {
// Every row kept.
for (int i = 0; i < originalBatchSize; i++) {
resultObjects[rowIndex + i] = new BooleanWritable(true);
}
}
}
rowIndex += originalBatchSize;
}
return true;
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestMurmurHashExpression method testMurmurHashIntColIntCol.
@Test
public void testMurmurHashIntColIntCol() throws HiveException {
LongColumnVector cvInt1 = (LongColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("int"), false, false, SIZE, rand);
LongColumnVector cvInt2 = (LongColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("int"), false, false, SIZE, rand);
VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
vrb.cols[0] = cvInt1;
vrb.cols[1] = cvInt2;
vrb.cols[2] = new LongColumnVector(SIZE);
new MurmurHashIntColIntCol(0, 1, 2).evaluate(vrb);
// non-repeating
Assert.assertEquals(false, vrb.cols[2].isRepeating);
for (int i = 0; i < SIZE; i++) {
Assert.assertEquals(ObjectInspectorUtils.getBucketHashCode(new Object[] { new LongWritable(cvInt1.vector[i]), new LongWritable(cvInt2.vector[i]) }, new ObjectInspector[] { PrimitiveObjectInspectorFactory.writableLongObjectInspector, PrimitiveObjectInspectorFactory.writableLongObjectInspector }), ((LongColumnVector) vrb.cols[2]).vector[i]);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestMurmurHashExpression method testMurmurHashRepeating.
@Test
public void testMurmurHashRepeating() throws HiveException {
BytesColumnVector cvString1 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
BytesColumnVector cvString2 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
vrb.cols[0] = cvString1;
vrb.cols[1] = cvString2;
vrb.cols[2] = new LongColumnVector(SIZE);
new MurmurHashStringColStringCol(0, 1, 2).evaluate(vrb);
// both of the inputs were repeating
Assert.assertEquals(true, vrb.cols[2].isRepeating);
Text t1 = new Text();
t1.set(cvString1.vector[0], cvString1.start[0], cvString1.length[0]);
Text t2 = new Text();
t2.set(cvString2.vector[0], cvString2.start[0], cvString2.length[0]);
// output's first element is the hash of first input elements
Assert.assertEquals(ObjectInspectorUtils.getBucketHashCode(new Object[] { t1, t2 }, new ObjectInspector[] { PrimitiveObjectInspectorFactory.writableStringObjectInspector, PrimitiveObjectInspectorFactory.writableStringObjectInspector }), ((LongColumnVector) vrb.cols[2]).vector[0]);
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestMurmurHashExpression method testMurmurHashRepeatingBothNulls.
@Test
public void testMurmurHashRepeatingBothNulls() throws HiveException {
BytesColumnVector cvString1 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
BytesColumnVector cvString2 = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, true, SIZE, rand);
cvString1.isNull[0] = true;
cvString2.isNull[0] = true;
VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
vrb.cols[0] = cvString1;
vrb.cols[1] = cvString2;
vrb.cols[2] = new LongColumnVector(SIZE);
// fake output value to test short-circuiting
((LongColumnVector) vrb.cols[2]).vector[1] = 1234;
new MurmurHashStringColStringCol(0, 1, 2).evaluate(vrb);
// both of the inputs were repeating
Assert.assertEquals(true, vrb.cols[2].isRepeating);
// output's first element is 0, which is hash of null elements
Assert.assertEquals(0, ((LongColumnVector) vrb.cols[2]).vector[0]);
// if isRepeating, vectorization logic is not supposed to touch other elements than 0th
Assert.assertEquals(1234, ((LongColumnVector) vrb.cols[2]).vector[1]);
}
use of org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch in project hive by apache.
the class TestMurmurHashExpression method testMurmurHashStringColIntCol.
@Test
public void testMurmurHashStringColIntCol() throws HiveException {
BytesColumnVector cvString = (BytesColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("string"), false, false, SIZE, rand);
LongColumnVector cvInt = (LongColumnVector) ColumnVectorGenUtil.generateColumnVector(TypeInfoFactory.getPrimitiveTypeInfo("int"), false, false, SIZE, rand);
VectorizedRowBatch vrb = new VectorizedRowBatch(3, SIZE);
vrb.cols[0] = cvString;
vrb.cols[1] = cvInt;
vrb.cols[2] = new LongColumnVector(SIZE);
new MurmurHashStringColIntCol(0, 1, 2).evaluate(vrb);
for (int i = 0; i < SIZE; i++) {
Text t = new Text();
t.set(cvString.vector[i], cvString.start[i], cvString.length[i]);
Assert.assertEquals(ObjectInspectorUtils.getBucketHashCode(new Object[] { t, new LongWritable(cvInt.vector[i]) }, new ObjectInspector[] { PrimitiveObjectInspectorFactory.writableStringObjectInspector, PrimitiveObjectInspectorFactory.writableLongObjectInspector }), ((LongColumnVector) vrb.cols[2]).vector[i]);
}
}
Aggregations