use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.stringTypeInfo in project hive by apache.
the class TestVectorGenericDateExpressions method testDateAddScalarCol.
@Test
public void testDateAddScalarCol() throws HiveException {
for (PrimitiveCategory scalarType1 : dateTimestampStringTypes) testDateAddScalarCol(scalarType1, true);
VectorExpression udf = new VectorUDFDateAddScalarCol("error".getBytes(utf8), 0, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.stringTypeInfo, TypeInfoFactory.timestampTypeInfo });
udf.transientInit(hiveConf);
VectorizedRowBatch batch = new VectorizedRowBatch(2, 1);
batch.cols[0] = new LongColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.stringTypeInfo in project hive by apache.
the class TestVectorGenericDateExpressions method testDateDiffScalarCol.
@Test
public void testDateDiffScalarCol() throws HiveException {
for (PrimitiveCategory scalarType1 : dateTimestampStringTypes) {
for (PrimitiveCategory colType2 : dateTimestampStringTypes) {
LongColumnVector date2 = newRandomLongColumnVector(10000, size);
LongColumnVector output = new LongColumnVector(size);
ColumnVector col2 = castTo(date2, colType2);
VectorizedRowBatch batch = new VectorizedRowBatch(2, size);
batch.cols[0] = col2;
batch.cols[1] = output;
long scalar1 = newRandom(1000);
validateDateDiff(batch, scalar1, scalarType1, colType2, date2);
TestVectorizedRowBatch.addRandomNulls(date2);
batch.cols[0] = castTo(date2, colType2);
validateDateDiff(batch, scalar1, scalarType1, colType2, date2);
}
}
VectorExpression udf;
byte[] bytes = "error".getBytes(utf8);
VectorizedRowBatch batch = new VectorizedRowBatch(2, 1);
udf = new VectorUDFDateDiffScalarCol(new Timestamp(0), 0, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.stringTypeInfo });
udf.transientInit(hiveConf);
batch.cols[0] = new BytesColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
BytesColumnVector bcv = (BytesColumnVector) batch.cols[0];
bcv.vector[0] = bytes;
bcv.start[0] = 0;
bcv.length[0] = bytes.length;
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
udf = new VectorUDFDateDiffScalarCol(bytes, 0, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.stringTypeInfo, TypeInfoFactory.timestampTypeInfo });
udf.transientInit(hiveConf);
batch.cols[0] = new LongColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.stringTypeInfo in project hive by apache.
the class TestVectorGenericDateExpressions method testDateDiffColScalar.
@Test
public void testDateDiffColScalar() throws HiveException {
for (PrimitiveCategory colType1 : dateTimestampStringTypes) {
for (PrimitiveCategory scalarType2 : dateTimestampStringTypes) {
LongColumnVector date1 = newRandomLongColumnVector(10000, size);
LongColumnVector output = new LongColumnVector(size);
VectorizedRowBatch batch = new VectorizedRowBatch(2, size);
batch.cols[0] = castTo(date1, colType1);
batch.cols[1] = output;
long scalar2 = newRandom(1000);
validateDateDiff(batch, date1, scalar2, colType1, scalarType2);
TestVectorizedRowBatch.addRandomNulls(date1);
batch.cols[0] = castTo(date1, colType1);
validateDateDiff(batch, date1, scalar2, colType1, scalarType2);
}
}
VectorExpression udf;
byte[] bytes = "error".getBytes(utf8);
VectorizedRowBatch batch = new VectorizedRowBatch(2, 1);
udf = new VectorUDFDateDiffColScalar(0, 0L, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.stringTypeInfo });
batch.cols[0] = new BytesColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
BytesColumnVector bcv = (BytesColumnVector) batch.cols[0];
bcv.vector[0] = bytes;
bcv.start[0] = 0;
bcv.length[0] = bytes.length;
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
udf = new VectorUDFDateDiffColScalar(0, bytes, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.stringTypeInfo });
udf.transientInit(hiveConf);
batch.cols[0] = new LongColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.stringTypeInfo in project hive by apache.
the class TestKeyWrapperFactory method setup.
@Before
public void setup() throws Exception {
SessionState ss = new SessionState(new HiveConf());
SessionState.setCurrentSessionState(ss);
ArrayList<Text> col1 = new ArrayList<Text>();
col1.add(new Text("0"));
col1.add(new Text("1"));
col1.add(new Text("2"));
col1.add(new Text("3"));
TypeInfo col1Type = TypeInfoFactory.getListTypeInfo(TypeInfoFactory.stringTypeInfo);
ArrayList<Text> cola = new ArrayList<Text>();
cola.add(new Text("a"));
cola.add(new Text("b"));
cola.add(new Text("c"));
TypeInfo colaType = TypeInfoFactory.getListTypeInfo(TypeInfoFactory.stringTypeInfo);
try {
ArrayList<Object> data = new ArrayList<Object>();
data.add(col1);
data.add(cola);
ArrayList<String> names = new ArrayList<String>();
names.add("col1");
names.add("cola");
ArrayList<TypeInfo> typeInfos = new ArrayList<TypeInfo>();
typeInfos.add(col1Type);
typeInfos.add(colaType);
TypeInfo dataType = TypeInfoFactory.getStructTypeInfo(names, typeInfos);
InspectableObject r = new InspectableObject();
ObjectInspector[] oi = new ObjectInspector[1];
r.o = data;
oi[0] = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(dataType);
try {
// get a evaluator for a simple field expression
ExprNodeDesc exprDesc = new ExprNodeColumnDesc(colaType, "cola", "", false);
ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(exprDesc);
ExprNodeEvaluator[] evals = new ExprNodeEvaluator[1];
evals[0] = eval;
ObjectInspector resultOI = eval.initialize(oi[0]);
ObjectInspector[] resultOIs = new ObjectInspector[1];
resultOIs[0] = resultOI;
factory = new KeyWrapperFactory(evals, oi, resultOIs);
} catch (Throwable e) {
e.printStackTrace();
throw e;
}
} catch (Throwable e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.stringTypeInfo in project hive by apache.
the class TestMapJoinOperator method doTestMultiKey1.
public boolean doTestMultiKey1(long seed, int hiveConfVariation, VectorMapJoinVariation vectorMapJoinVariation, MapJoinPlanVariation mapJoinPlanVariation) throws Exception {
int rowCount = 10;
HiveConf hiveConf = new HiveConf();
if (!addNonLongHiveConfVariation(hiveConfVariation, hiveConf)) {
return true;
}
TypeInfo[] bigTableTypeInfos = null;
int[] bigTableKeyColumnNums = null;
TypeInfo[] smallTableValueTypeInfos = null;
int[] smallTableRetainKeyColumnNums = null;
SmallTableGenerationParameters smallTableGenerationParameters = new SmallTableGenerationParameters();
MapJoinTestDescription testDesc = null;
MapJoinTestData testData = null;
// Three key columns.
bigTableTypeInfos = new TypeInfo[] { TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.shortTypeInfo, TypeInfoFactory.stringTypeInfo };
bigTableKeyColumnNums = new int[] { 0, 1, 2 };
smallTableRetainKeyColumnNums = new int[] { 0, 1, 2 };
smallTableValueTypeInfos = new TypeInfo[] { new DecimalTypeInfo(38, 18) };
// ----------------------------------------------------------------------------------------------
testDesc = new MapJoinTestDescription(hiveConf, vectorMapJoinVariation, bigTableTypeInfos, bigTableKeyColumnNums, smallTableValueTypeInfos, smallTableRetainKeyColumnNums, smallTableGenerationParameters, mapJoinPlanVariation);
if (!goodTestVariation(testDesc)) {
return false;
}
// Prepare data. Good for ANY implementation variation.
testData = new MapJoinTestData(rowCount, testDesc, seed);
executeTest(testDesc, testData, "testMultiKey1");
return false;
}
Aggregations