use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category in project hive by apache.
the class Vectorizer method canSpecializeMapJoin.
private boolean canSpecializeMapJoin(Operator<? extends OperatorDesc> op, MapJoinDesc desc, boolean isTezOrSpark, VectorizationContext vContext, VectorMapJoinInfo vectorMapJoinInfo) throws HiveException {
Preconditions.checkState(op instanceof MapJoinOperator);
// Allocate a VectorReduceSinkDesc initially with implementation type NONE so EXPLAIN
// can report this operator was vectorized, but not native. And, the conditions.
VectorMapJoinDesc vectorDesc = new VectorMapJoinDesc();
desc.setVectorDesc(vectorDesc);
boolean isVectorizationMapJoinNativeEnabled = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED);
String engine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE);
boolean oneMapJoinCondition = (desc.getConds().length == 1);
boolean hasNullSafes = onExpressionHasNullSafes(desc);
byte posBigTable = (byte) desc.getPosBigTable();
// Since we want to display all the met and not met conditions in EXPLAIN, we determine all
// information first....
List<ExprNodeDesc> keyDesc = desc.getKeys().get(posBigTable);
VectorExpression[] allBigTableKeyExpressions = vContext.getVectorExpressions(keyDesc);
final int allBigTableKeyExpressionsLength = allBigTableKeyExpressions.length;
// Assume.
boolean supportsKeyTypes = true;
HashSet<String> notSupportedKeyTypes = new HashSet<String>();
// Since a key expression can be a calculation and the key will go into a scratch column,
// we need the mapping and type information.
int[] bigTableKeyColumnMap = new int[allBigTableKeyExpressionsLength];
String[] bigTableKeyColumnNames = new String[allBigTableKeyExpressionsLength];
TypeInfo[] bigTableKeyTypeInfos = new TypeInfo[allBigTableKeyExpressionsLength];
ArrayList<VectorExpression> bigTableKeyExpressionsList = new ArrayList<VectorExpression>();
VectorExpression[] bigTableKeyExpressions;
for (int i = 0; i < allBigTableKeyExpressionsLength; i++) {
VectorExpression ve = allBigTableKeyExpressions[i];
if (!IdentityExpression.isColumnOnly(ve)) {
bigTableKeyExpressionsList.add(ve);
}
bigTableKeyColumnMap[i] = ve.getOutputColumn();
ExprNodeDesc exprNode = keyDesc.get(i);
bigTableKeyColumnNames[i] = exprNode.toString();
TypeInfo typeInfo = exprNode.getTypeInfo();
// same check used in HashTableLoader.
if (!MapJoinKey.isSupportedField(typeInfo)) {
supportsKeyTypes = false;
Category category = typeInfo.getCategory();
notSupportedKeyTypes.add((category != Category.PRIMITIVE ? category.toString() : ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory().toString()));
}
bigTableKeyTypeInfos[i] = typeInfo;
}
if (bigTableKeyExpressionsList.size() == 0) {
bigTableKeyExpressions = null;
} else {
bigTableKeyExpressions = bigTableKeyExpressionsList.toArray(new VectorExpression[0]);
}
List<ExprNodeDesc> bigTableExprs = desc.getExprs().get(posBigTable);
VectorExpression[] allBigTableValueExpressions = vContext.getVectorExpressions(bigTableExprs);
boolean isFastHashTableEnabled = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED);
// Especially since LLAP is prone to turn it off in the MapJoinDesc in later
// physical optimizer stages...
boolean isHybridHashJoin = desc.isHybridHashJoin();
/*
* Populate vectorMapJoininfo.
*/
/*
* Similarly, we need a mapping since a value expression can be a calculation and the value
* will go into a scratch column.
*/
int[] bigTableValueColumnMap = new int[allBigTableValueExpressions.length];
String[] bigTableValueColumnNames = new String[allBigTableValueExpressions.length];
TypeInfo[] bigTableValueTypeInfos = new TypeInfo[allBigTableValueExpressions.length];
ArrayList<VectorExpression> bigTableValueExpressionsList = new ArrayList<VectorExpression>();
VectorExpression[] bigTableValueExpressions;
for (int i = 0; i < bigTableValueColumnMap.length; i++) {
VectorExpression ve = allBigTableValueExpressions[i];
if (!IdentityExpression.isColumnOnly(ve)) {
bigTableValueExpressionsList.add(ve);
}
bigTableValueColumnMap[i] = ve.getOutputColumn();
ExprNodeDesc exprNode = bigTableExprs.get(i);
bigTableValueColumnNames[i] = exprNode.toString();
bigTableValueTypeInfos[i] = exprNode.getTypeInfo();
}
if (bigTableValueExpressionsList.size() == 0) {
bigTableValueExpressions = null;
} else {
bigTableValueExpressions = bigTableValueExpressionsList.toArray(new VectorExpression[0]);
}
vectorMapJoinInfo.setBigTableKeyColumnMap(bigTableKeyColumnMap);
vectorMapJoinInfo.setBigTableKeyColumnNames(bigTableKeyColumnNames);
vectorMapJoinInfo.setBigTableKeyTypeInfos(bigTableKeyTypeInfos);
vectorMapJoinInfo.setBigTableKeyExpressions(bigTableKeyExpressions);
vectorMapJoinInfo.setBigTableValueColumnMap(bigTableValueColumnMap);
vectorMapJoinInfo.setBigTableValueColumnNames(bigTableValueColumnNames);
vectorMapJoinInfo.setBigTableValueTypeInfos(bigTableValueTypeInfos);
vectorMapJoinInfo.setBigTableValueExpressions(bigTableValueExpressions);
/*
* Small table information.
*/
VectorColumnOutputMapping bigTableRetainedMapping = new VectorColumnOutputMapping("Big Table Retained Mapping");
VectorColumnOutputMapping bigTableOuterKeyMapping = new VectorColumnOutputMapping("Big Table Outer Key Mapping");
// The order of the fields in the LazyBinary small table value must be used, so
// we use the source ordering flavor for the mapping.
VectorColumnSourceMapping smallTableMapping = new VectorColumnSourceMapping("Small Table Mapping");
Byte[] order = desc.getTagOrder();
Byte posSingleVectorMapJoinSmallTable = (order[0] == posBigTable ? order[1] : order[0]);
boolean isOuterJoin = !desc.getNoOuterJoin();
/*
* Gather up big and small table output result information from the MapJoinDesc.
*/
List<Integer> bigTableRetainList = desc.getRetainList().get(posBigTable);
int bigTableRetainSize = bigTableRetainList.size();
int[] smallTableIndices;
int smallTableIndicesSize;
List<ExprNodeDesc> smallTableExprs = desc.getExprs().get(posSingleVectorMapJoinSmallTable);
if (desc.getValueIndices() != null && desc.getValueIndices().get(posSingleVectorMapJoinSmallTable) != null) {
smallTableIndices = desc.getValueIndices().get(posSingleVectorMapJoinSmallTable);
smallTableIndicesSize = smallTableIndices.length;
} else {
smallTableIndices = null;
smallTableIndicesSize = 0;
}
List<Integer> smallTableRetainList = desc.getRetainList().get(posSingleVectorMapJoinSmallTable);
int smallTableRetainSize = smallTableRetainList.size();
int smallTableResultSize = 0;
if (smallTableIndicesSize > 0) {
smallTableResultSize = smallTableIndicesSize;
} else if (smallTableRetainSize > 0) {
smallTableResultSize = smallTableRetainSize;
}
/*
* Determine the big table retained mapping first so we can optimize out (with
* projection) copying inner join big table keys in the subsequent small table results section.
*/
// We use a mapping object here so we can build the projection in any order and
// get the ordered by 0 to n-1 output columns at the end.
//
// Also, to avoid copying a big table key into the small table result area for inner joins,
// we reference it with the projection so there can be duplicate output columns
// in the projection.
VectorColumnSourceMapping projectionMapping = new VectorColumnSourceMapping("Projection Mapping");
int nextOutputColumn = (order[0] == posBigTable ? 0 : smallTableResultSize);
for (int i = 0; i < bigTableRetainSize; i++) {
// Since bigTableValueExpressions may do a calculation and produce a scratch column, we
// need to map to the right batch column.
int retainColumn = bigTableRetainList.get(i);
int batchColumnIndex = bigTableValueColumnMap[retainColumn];
TypeInfo typeInfo = bigTableValueTypeInfos[i];
// With this map we project the big table batch to make it look like an output batch.
projectionMapping.add(nextOutputColumn, batchColumnIndex, typeInfo);
// Collect columns we copy from the big table batch to the overflow batch.
if (!bigTableRetainedMapping.containsOutputColumn(batchColumnIndex)) {
// Tolerate repeated use of a big table column.
bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeInfo);
}
nextOutputColumn++;
}
/*
* Now determine the small table results.
*/
boolean smallTableExprVectorizes = true;
int firstSmallTableOutputColumn;
firstSmallTableOutputColumn = (order[0] == posBigTable ? bigTableRetainSize : 0);
int smallTableOutputCount = 0;
nextOutputColumn = firstSmallTableOutputColumn;
// Small table indices has more information (i.e. keys) than retain, so use it if it exists...
String[] bigTableRetainedNames;
if (smallTableIndicesSize > 0) {
smallTableOutputCount = smallTableIndicesSize;
bigTableRetainedNames = new String[smallTableOutputCount];
for (int i = 0; i < smallTableIndicesSize; i++) {
if (smallTableIndices[i] >= 0) {
// Zero and above numbers indicate a big table key is needed for
// small table result "area".
int keyIndex = smallTableIndices[i];
// Since bigTableKeyExpressions may do a calculation and produce a scratch column, we
// need to map the right column.
int batchKeyColumn = bigTableKeyColumnMap[keyIndex];
bigTableRetainedNames[i] = bigTableKeyColumnNames[keyIndex];
TypeInfo typeInfo = bigTableKeyTypeInfos[keyIndex];
if (!isOuterJoin) {
// Optimize inner join keys of small table results.
// Project the big table key into the small table result "area".
projectionMapping.add(nextOutputColumn, batchKeyColumn, typeInfo);
if (!bigTableRetainedMapping.containsOutputColumn(batchKeyColumn)) {
// If necessary, copy the big table key into the overflow batch's small table
// result "area".
bigTableRetainedMapping.add(batchKeyColumn, batchKeyColumn, typeInfo);
}
} else {
// For outer joins, since the small table key can be null when there is no match,
// we must have a physical (scratch) column for those keys. We cannot use the
// projection optimization used by inner joins above.
int scratchColumn = vContext.allocateScratchColumn(typeInfo);
projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
bigTableRetainedMapping.add(batchKeyColumn, scratchColumn, typeInfo);
bigTableOuterKeyMapping.add(batchKeyColumn, scratchColumn, typeInfo);
}
} else {
// Negative numbers indicate a column to be (deserialize) read from the small table's
// LazyBinary value row.
int smallTableValueIndex = -smallTableIndices[i] - 1;
ExprNodeDesc smallTableExprNode = smallTableExprs.get(i);
if (!validateExprNodeDesc(smallTableExprNode, "Small Table")) {
clearNotVectorizedReason();
smallTableExprVectorizes = false;
}
bigTableRetainedNames[i] = smallTableExprNode.toString();
TypeInfo typeInfo = smallTableExprNode.getTypeInfo();
// Make a new big table scratch column for the small table value.
int scratchColumn = vContext.allocateScratchColumn(typeInfo);
projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
smallTableMapping.add(smallTableValueIndex, scratchColumn, typeInfo);
}
nextOutputColumn++;
}
} else if (smallTableRetainSize > 0) {
smallTableOutputCount = smallTableRetainSize;
bigTableRetainedNames = new String[smallTableOutputCount];
for (int i = 0; i < smallTableRetainSize; i++) {
int smallTableValueIndex = smallTableRetainList.get(i);
ExprNodeDesc smallTableExprNode = smallTableExprs.get(i);
if (!validateExprNodeDesc(smallTableExprNode, "Small Table")) {
clearNotVectorizedReason();
smallTableExprVectorizes = false;
}
bigTableRetainedNames[i] = smallTableExprNode.toString();
// Make a new big table scratch column for the small table value.
TypeInfo typeInfo = smallTableExprNode.getTypeInfo();
int scratchColumn = vContext.allocateScratchColumn(typeInfo);
projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
smallTableMapping.add(smallTableValueIndex, scratchColumn, typeInfo);
nextOutputColumn++;
}
} else {
bigTableRetainedNames = new String[0];
}
boolean useOptimizedTable = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE);
// Remember the condition variables for EXPLAIN regardless of whether we specialize or not.
vectorDesc.setUseOptimizedTable(useOptimizedTable);
vectorDesc.setIsVectorizationMapJoinNativeEnabled(isVectorizationMapJoinNativeEnabled);
vectorDesc.setEngine(engine);
vectorDesc.setOneMapJoinCondition(oneMapJoinCondition);
vectorDesc.setHasNullSafes(hasNullSafes);
vectorDesc.setSmallTableExprVectorizes(smallTableExprVectorizes);
vectorDesc.setIsFastHashTableEnabled(isFastHashTableEnabled);
vectorDesc.setIsHybridHashJoin(isHybridHashJoin);
vectorDesc.setSupportsKeyTypes(supportsKeyTypes);
if (!supportsKeyTypes) {
vectorDesc.setNotSupportedKeyTypes(new ArrayList(notSupportedKeyTypes));
}
// Check common conditions for both Optimized and Fast Hash Tables.
// Assume.
boolean result = true;
if (!useOptimizedTable || !isVectorizationMapJoinNativeEnabled || !isTezOrSpark || !oneMapJoinCondition || hasNullSafes || !smallTableExprVectorizes) {
result = false;
}
if (!isFastHashTableEnabled) {
// Check optimized-only hash table restrictions.
if (!supportsKeyTypes) {
result = false;
}
} else {
if (isHybridHashJoin) {
result = false;
}
}
// Convert dynamic arrays and maps to simple arrays.
bigTableRetainedMapping.finalize();
bigTableOuterKeyMapping.finalize();
smallTableMapping.finalize();
vectorMapJoinInfo.setBigTableRetainedMapping(bigTableRetainedMapping);
vectorMapJoinInfo.setBigTableOuterKeyMapping(bigTableOuterKeyMapping);
vectorMapJoinInfo.setSmallTableMapping(smallTableMapping);
projectionMapping.finalize();
// Verify we added an entry for each output.
assert projectionMapping.isSourceSequenceGood();
vectorMapJoinInfo.setProjectionMapping(projectionMapping);
return result;
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category in project hive by apache.
the class GenericUDFDecode method initialize.
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length != 2) {
throw new UDFArgumentLengthException("Decode() requires exactly two arguments");
}
if (arguments[0].getCategory() != Category.PRIMITIVE) {
throw new UDFArgumentTypeException(0, "The first argument to Decode() must be primitive");
}
PrimitiveCategory category = ((PrimitiveObjectInspector) arguments[0]).getPrimitiveCategory();
if (category == PrimitiveCategory.BINARY) {
bytesOI = (BinaryObjectInspector) arguments[0];
} else if (category == PrimitiveCategory.VOID) {
bytesOI = (VoidObjectInspector) arguments[0];
} else {
throw new UDFArgumentTypeException(0, "The first argument to Decode() must be binary");
}
if (arguments[1].getCategory() != Category.PRIMITIVE) {
throw new UDFArgumentTypeException(1, "The second argument to Decode() must be primitive");
}
charsetOI = (PrimitiveObjectInspector) arguments[1];
if (PrimitiveGrouping.STRING_GROUP != PrimitiveObjectInspectorUtils.getPrimitiveGrouping(charsetOI.getPrimitiveCategory())) {
throw new UDFArgumentTypeException(1, "The second argument to Decode() must be from string group");
}
// If the character set for decoding is constant, we can optimize that
if (arguments[1] instanceof ConstantObjectInspector) {
String charSetName = ((ConstantObjectInspector) arguments[1]).getWritableConstantValue().toString();
decoder = Charset.forName(charSetName).newDecoder().onMalformedInput(CodingErrorAction.REPORT).onUnmappableCharacter(CodingErrorAction.REPORT);
}
return PrimitiveObjectInspectorFactory.javaStringObjectInspector;
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category in project hive by apache.
the class GenericUDFBaseNumeric method initialize.
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length != 2) {
throw new UDFArgumentException(opName + " requires two arguments.");
}
for (int i = 0; i < 2; i++) {
Category category = arguments[i].getCategory();
if (category != Category.PRIMITIVE) {
throw new UDFArgumentTypeException(i, "The " + GenericUDFUtils.getOrdinal(i + 1) + " argument of " + opName + " is expected to a " + Category.PRIMITIVE.toString().toLowerCase() + " type, but " + category.toString().toLowerCase() + " is found");
}
}
// we have access to these values in the map/reduce tasks.
if (confLookupNeeded) {
CompatLevel compatLevel = HiveCompat.getCompatLevel(SessionState.get().getConf());
ansiSqlArithmetic = compatLevel.ordinal() > CompatLevel.HIVE_0_12.ordinal();
confLookupNeeded = false;
}
leftOI = (PrimitiveObjectInspector) arguments[0];
rightOI = (PrimitiveObjectInspector) arguments[1];
resultOI = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(deriveResultTypeInfo());
converterLeft = ObjectInspectorConverters.getConverter(leftOI, resultOI);
converterRight = ObjectInspectorConverters.getConverter(rightOI, resultOI);
return resultOI;
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category in project hive by apache.
the class AccumuloRowSerializer method writeWithLevel.
/**
* Recursively serialize an Object using its {@link ObjectInspector}, respecting the
* separators defined by the {@link LazySerDeParameters}.
* @param oi ObjectInspector for the current object
* @param value The current object
* @param output A buffer output is written to
* @param mapping The mapping for this Hive column
* @param level The current level/offset for the SerDe separator
* @throws IOException
*/
protected void writeWithLevel(ObjectInspector oi, Object value, ByteStream.Output output, ColumnMapping mapping, int level) throws IOException {
switch(oi.getCategory()) {
case PRIMITIVE:
if (mapping.getEncoding() == ColumnEncoding.BINARY) {
this.writeBinary(output, value, (PrimitiveObjectInspector) oi);
} else {
this.writeString(output, value, (PrimitiveObjectInspector) oi);
}
return;
case LIST:
char separator = (char) serDeParams.getSeparators()[level];
ListObjectInspector loi = (ListObjectInspector) oi;
List<?> list = loi.getList(value);
ObjectInspector eoi = loi.getListElementObjectInspector();
if (list == null) {
log.debug("No objects found when serializing list");
return;
} else {
for (int i = 0; i < list.size(); i++) {
if (i > 0) {
output.write(separator);
}
writeWithLevel(eoi, list.get(i), output, mapping, level + 1);
}
}
return;
case MAP:
char sep = (char) serDeParams.getSeparators()[level];
char keyValueSeparator = (char) serDeParams.getSeparators()[level + 1];
MapObjectInspector moi = (MapObjectInspector) oi;
ObjectInspector koi = moi.getMapKeyObjectInspector();
ObjectInspector voi = moi.getMapValueObjectInspector();
Map<?, ?> map = moi.getMap(value);
if (map == null) {
log.debug("No object found when serializing map");
return;
} else {
boolean first = true;
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (first) {
first = false;
} else {
output.write(sep);
}
writeWithLevel(koi, entry.getKey(), output, mapping, level + 2);
output.write(keyValueSeparator);
writeWithLevel(voi, entry.getValue(), output, mapping, level + 2);
}
}
return;
case STRUCT:
sep = (char) serDeParams.getSeparators()[level];
StructObjectInspector soi = (StructObjectInspector) oi;
List<? extends StructField> fields = soi.getAllStructFieldRefs();
list = soi.getStructFieldsDataAsList(value);
if (list == null) {
log.debug("No object found when serializing struct");
return;
} else {
for (int i = 0; i < list.size(); i++) {
if (i > 0) {
output.write(sep);
}
writeWithLevel(fields.get(i).getFieldObjectInspector(), list.get(i), output, mapping, level + 1);
}
}
return;
default:
throw new RuntimeException("Unknown category type: " + oi.getCategory());
}
}
use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category in project hive by apache.
the class HCatRecordSerDe method serializeList.
private static List<?> serializeList(Object f, ListObjectInspector loi) throws SerDeException {
List l = loi.getList(f);
if (l == null) {
return null;
}
ObjectInspector eloi = loi.getListElementObjectInspector();
if (eloi.getCategory() == Category.PRIMITIVE) {
List<Object> list = new ArrayList<Object>(l.size());
for (int i = 0; i < l.size(); i++) {
list.add(((PrimitiveObjectInspector) eloi).getPrimitiveJavaObject(l.get(i)));
}
return list;
} else if (eloi.getCategory() == Category.STRUCT) {
List<List<?>> list = new ArrayList<List<?>>(l.size());
for (int i = 0; i < l.size(); i++) {
list.add(serializeStruct(l.get(i), (StructObjectInspector) eloi));
}
return list;
} else if (eloi.getCategory() == Category.LIST) {
List<List<?>> list = new ArrayList<List<?>>(l.size());
for (int i = 0; i < l.size(); i++) {
list.add(serializeList(l.get(i), (ListObjectInspector) eloi));
}
return list;
} else if (eloi.getCategory() == Category.MAP) {
List<Map<?, ?>> list = new ArrayList<Map<?, ?>>(l.size());
for (int i = 0; i < l.size(); i++) {
list.add(serializeMap(l.get(i), (MapObjectInspector) eloi));
}
return list;
} else {
throw new SerDeException(HCatRecordSerDe.class.toString() + " does not know what to do with fields of unknown category: " + eloi.getCategory() + " , type: " + eloi.getTypeName());
}
}
Aggregations