use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo in project hive by apache.
the class TestVectorSubStr method extractResultObjects.
private void extractResultObjects(VectorizedRowBatch batch, int rowIndex, VectorExtractRow resultVectorExtractRow, Object[] scrqtchRow, TypeInfo targetTypeInfo, Object[] resultObjects) {
ObjectInspector objectInspector = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(targetTypeInfo);
boolean selectedInUse = batch.selectedInUse;
int[] selected = batch.selected;
for (int logicalIndex = 0; logicalIndex < batch.size; logicalIndex++) {
final int batchIndex = (selectedInUse ? selected[logicalIndex] : logicalIndex);
try {
resultVectorExtractRow.extractRow(batch, batchIndex, scrqtchRow);
} catch (Exception e) {
Assert.fail(e.toString());
}
Object copyResult = ObjectInspectorUtils.copyToStandardObject(scrqtchRow[0], objectInspector, ObjectInspectorCopyOption.WRITABLE);
resultObjects[rowIndex++] = copyResult;
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo in project hive by apache.
the class TestVectorSubStr method doRowIfTest.
private void doRowIfTest(TypeInfo typeInfo, TypeInfo targetTypeInfo, List<String> columns, List<ExprNodeDesc> children, Object[][] randomRows, ObjectInspector rowInspector, GenericUDF genericUdf, Object[] resultObjects) throws Exception {
ExprNodeGenericFuncDesc exprDesc = new ExprNodeGenericFuncDesc(typeInfo, genericUdf, children);
HiveConf hiveConf = new HiveConf();
ExprNodeEvaluator evaluator = ExprNodeEvaluatorFactory.get(exprDesc, hiveConf);
evaluator.initialize(rowInspector);
ObjectInspector objectInspector = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(targetTypeInfo);
final int rowCount = randomRows.length;
for (int i = 0; i < rowCount; i++) {
Object[] row = randomRows[i];
Object result = evaluator.evaluate(row);
Object copyResult = ObjectInspectorUtils.copyToStandardObject(result, objectInspector, ObjectInspectorCopyOption.WRITABLE);
resultObjects[i] = copyResult;
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo in project hive by apache.
the class TestVectorTimestampExtract method extractResultObjects.
private void extractResultObjects(VectorizedRowBatch batch, int rowIndex, VectorExtractRow resultVectorExtractRow, Object[] scrqtchRow, TypeInfo targetTypeInfo, Object[] resultObjects) {
ObjectInspector objectInspector = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(targetTypeInfo);
boolean selectedInUse = batch.selectedInUse;
int[] selected = batch.selected;
for (int logicalIndex = 0; logicalIndex < batch.size; logicalIndex++) {
final int batchIndex = (selectedInUse ? selected[logicalIndex] : logicalIndex);
resultVectorExtractRow.extractRow(batch, batchIndex, scrqtchRow);
Object copyResult = ObjectInspectorUtils.copyToStandardObject(scrqtchRow[0], objectInspector, ObjectInspectorCopyOption.WRITABLE);
resultObjects[rowIndex++] = copyResult;
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo in project hive by apache.
the class GenericUDFBaseNwayCompare method initialize.
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length < 2) {
throw new UDFArgumentLengthException(getFuncName() + " requires at least 2 arguments, got " + arguments.length);
}
if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentException(getFuncName() + " only takes primitive types, got " + arguments[0].getTypeName());
}
argumentOIs = arguments;
converters = new Converter[arguments.length];
TypeInfo commonInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(arguments[0]);
for (int i = 1; i < arguments.length; i++) {
PrimitiveTypeInfo currInfo = (PrimitiveTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(arguments[i]);
commonInfo = FunctionRegistry.getCommonClassForComparison(commonInfo, currInfo);
}
resultOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo((commonInfo == null) ? TypeInfoFactory.doubleTypeInfo : commonInfo);
for (int i = 0; i < arguments.length; i++) {
converters[i] = ObjectInspectorConverters.getConverter(arguments[i], resultOI);
}
return resultOI;
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo in project hive by apache.
the class TypedBytesSerDe method initialize.
@Override
public void initialize(Configuration configuration, Properties tableProperties, Properties partitionProperties) throws SerDeException {
super.initialize(configuration, tableProperties, partitionProperties);
// We can get the table definition from tbl.
serializeBytesWritable = new BytesWritable();
barrStr = new NonSyncDataOutputBuffer();
tbOut = new TypedBytesWritableOutput(barrStr);
inBarrStr = new NonSyncDataInputBuffer();
tbIn = new TypedBytesWritableInput(inBarrStr);
// Read the configuration parameters
numColumns = getColumnNames().size();
// All columns have to be primitive.
for (int c = 0; c < numColumns; c++) {
if (getColumnTypes().get(c).getCategory() != Category.PRIMITIVE) {
throw new SerDeException(getClass().getName() + " only accepts primitive columns, but column[" + c + "] named " + getColumnNames().get(c) + " has category " + getColumnTypes().get(c).getCategory());
}
}
// Constructing the row ObjectInspector:
// The row consists of some string columns, each column will be a java
// String object.
List<ObjectInspector> columnOIs = new ArrayList<>(getColumnNames().size());
for (TypeInfo colType : getColumnTypes()) {
columnOIs.add(TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(colType));
}
// StandardStruct uses ArrayList to store the row.
rowOI = ObjectInspectorFactory.getStandardStructObjectInspector(getColumnNames(), columnOIs);
// Constructing the row object, etc, which will be reused for all rows.
row = new ArrayList<>(Collections.nCopies(numColumns, null));
}
Aggregations