use of org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo in project hive by apache.
the class Vectorizer method canSpecializeMapJoin.
private boolean canSpecializeMapJoin(Operator<? extends OperatorDesc> op, MapJoinDesc desc, boolean isTezOrSpark, VectorizationContext vContext, VectorMapJoinInfo vectorMapJoinInfo) throws HiveException {
Preconditions.checkState(op instanceof MapJoinOperator);
// Allocate a VectorReduceSinkDesc initially with implementation type NONE so EXPLAIN
// can report this operator was vectorized, but not native. And, the conditions.
VectorMapJoinDesc vectorDesc = new VectorMapJoinDesc();
desc.setVectorDesc(vectorDesc);
boolean isVectorizationMapJoinNativeEnabled = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED);
String engine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE);
boolean oneMapJoinCondition = (desc.getConds().length == 1);
boolean hasNullSafes = onExpressionHasNullSafes(desc);
byte posBigTable = (byte) desc.getPosBigTable();
// Since we want to display all the met and not met conditions in EXPLAIN, we determine all
// information first....
List<ExprNodeDesc> keyDesc = desc.getKeys().get(posBigTable);
VectorExpression[] allBigTableKeyExpressions = vContext.getVectorExpressions(keyDesc);
final int allBigTableKeyExpressionsLength = allBigTableKeyExpressions.length;
// Assume.
boolean supportsKeyTypes = true;
HashSet<String> notSupportedKeyTypes = new HashSet<String>();
// Since a key expression can be a calculation and the key will go into a scratch column,
// we need the mapping and type information.
int[] bigTableKeyColumnMap = new int[allBigTableKeyExpressionsLength];
String[] bigTableKeyColumnNames = new String[allBigTableKeyExpressionsLength];
TypeInfo[] bigTableKeyTypeInfos = new TypeInfo[allBigTableKeyExpressionsLength];
ArrayList<VectorExpression> bigTableKeyExpressionsList = new ArrayList<VectorExpression>();
VectorExpression[] bigTableKeyExpressions;
for (int i = 0; i < allBigTableKeyExpressionsLength; i++) {
VectorExpression ve = allBigTableKeyExpressions[i];
if (!IdentityExpression.isColumnOnly(ve)) {
bigTableKeyExpressionsList.add(ve);
}
bigTableKeyColumnMap[i] = ve.getOutputColumn();
ExprNodeDesc exprNode = keyDesc.get(i);
bigTableKeyColumnNames[i] = exprNode.toString();
TypeInfo typeInfo = exprNode.getTypeInfo();
// same check used in HashTableLoader.
if (!MapJoinKey.isSupportedField(typeInfo)) {
supportsKeyTypes = false;
Category category = typeInfo.getCategory();
notSupportedKeyTypes.add((category != Category.PRIMITIVE ? category.toString() : ((PrimitiveTypeInfo) typeInfo).getPrimitiveCategory().toString()));
}
bigTableKeyTypeInfos[i] = typeInfo;
}
if (bigTableKeyExpressionsList.size() == 0) {
bigTableKeyExpressions = null;
} else {
bigTableKeyExpressions = bigTableKeyExpressionsList.toArray(new VectorExpression[0]);
}
List<ExprNodeDesc> bigTableExprs = desc.getExprs().get(posBigTable);
VectorExpression[] allBigTableValueExpressions = vContext.getVectorExpressions(bigTableExprs);
boolean isFastHashTableEnabled = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED);
// Especially since LLAP is prone to turn it off in the MapJoinDesc in later
// physical optimizer stages...
boolean isHybridHashJoin = desc.isHybridHashJoin();
/*
* Populate vectorMapJoininfo.
*/
/*
* Similarly, we need a mapping since a value expression can be a calculation and the value
* will go into a scratch column.
*/
int[] bigTableValueColumnMap = new int[allBigTableValueExpressions.length];
String[] bigTableValueColumnNames = new String[allBigTableValueExpressions.length];
TypeInfo[] bigTableValueTypeInfos = new TypeInfo[allBigTableValueExpressions.length];
ArrayList<VectorExpression> bigTableValueExpressionsList = new ArrayList<VectorExpression>();
VectorExpression[] bigTableValueExpressions;
for (int i = 0; i < bigTableValueColumnMap.length; i++) {
VectorExpression ve = allBigTableValueExpressions[i];
if (!IdentityExpression.isColumnOnly(ve)) {
bigTableValueExpressionsList.add(ve);
}
bigTableValueColumnMap[i] = ve.getOutputColumn();
ExprNodeDesc exprNode = bigTableExprs.get(i);
bigTableValueColumnNames[i] = exprNode.toString();
bigTableValueTypeInfos[i] = exprNode.getTypeInfo();
}
if (bigTableValueExpressionsList.size() == 0) {
bigTableValueExpressions = null;
} else {
bigTableValueExpressions = bigTableValueExpressionsList.toArray(new VectorExpression[0]);
}
vectorMapJoinInfo.setBigTableKeyColumnMap(bigTableKeyColumnMap);
vectorMapJoinInfo.setBigTableKeyColumnNames(bigTableKeyColumnNames);
vectorMapJoinInfo.setBigTableKeyTypeInfos(bigTableKeyTypeInfos);
vectorMapJoinInfo.setBigTableKeyExpressions(bigTableKeyExpressions);
vectorMapJoinInfo.setBigTableValueColumnMap(bigTableValueColumnMap);
vectorMapJoinInfo.setBigTableValueColumnNames(bigTableValueColumnNames);
vectorMapJoinInfo.setBigTableValueTypeInfos(bigTableValueTypeInfos);
vectorMapJoinInfo.setBigTableValueExpressions(bigTableValueExpressions);
/*
* Small table information.
*/
VectorColumnOutputMapping bigTableRetainedMapping = new VectorColumnOutputMapping("Big Table Retained Mapping");
VectorColumnOutputMapping bigTableOuterKeyMapping = new VectorColumnOutputMapping("Big Table Outer Key Mapping");
// The order of the fields in the LazyBinary small table value must be used, so
// we use the source ordering flavor for the mapping.
VectorColumnSourceMapping smallTableMapping = new VectorColumnSourceMapping("Small Table Mapping");
Byte[] order = desc.getTagOrder();
Byte posSingleVectorMapJoinSmallTable = (order[0] == posBigTable ? order[1] : order[0]);
boolean isOuterJoin = !desc.getNoOuterJoin();
/*
* Gather up big and small table output result information from the MapJoinDesc.
*/
List<Integer> bigTableRetainList = desc.getRetainList().get(posBigTable);
int bigTableRetainSize = bigTableRetainList.size();
int[] smallTableIndices;
int smallTableIndicesSize;
List<ExprNodeDesc> smallTableExprs = desc.getExprs().get(posSingleVectorMapJoinSmallTable);
if (desc.getValueIndices() != null && desc.getValueIndices().get(posSingleVectorMapJoinSmallTable) != null) {
smallTableIndices = desc.getValueIndices().get(posSingleVectorMapJoinSmallTable);
smallTableIndicesSize = smallTableIndices.length;
} else {
smallTableIndices = null;
smallTableIndicesSize = 0;
}
List<Integer> smallTableRetainList = desc.getRetainList().get(posSingleVectorMapJoinSmallTable);
int smallTableRetainSize = smallTableRetainList.size();
int smallTableResultSize = 0;
if (smallTableIndicesSize > 0) {
smallTableResultSize = smallTableIndicesSize;
} else if (smallTableRetainSize > 0) {
smallTableResultSize = smallTableRetainSize;
}
/*
* Determine the big table retained mapping first so we can optimize out (with
* projection) copying inner join big table keys in the subsequent small table results section.
*/
// We use a mapping object here so we can build the projection in any order and
// get the ordered by 0 to n-1 output columns at the end.
//
// Also, to avoid copying a big table key into the small table result area for inner joins,
// we reference it with the projection so there can be duplicate output columns
// in the projection.
VectorColumnSourceMapping projectionMapping = new VectorColumnSourceMapping("Projection Mapping");
int nextOutputColumn = (order[0] == posBigTable ? 0 : smallTableResultSize);
for (int i = 0; i < bigTableRetainSize; i++) {
// Since bigTableValueExpressions may do a calculation and produce a scratch column, we
// need to map to the right batch column.
int retainColumn = bigTableRetainList.get(i);
int batchColumnIndex = bigTableValueColumnMap[retainColumn];
TypeInfo typeInfo = bigTableValueTypeInfos[i];
// With this map we project the big table batch to make it look like an output batch.
projectionMapping.add(nextOutputColumn, batchColumnIndex, typeInfo);
// Collect columns we copy from the big table batch to the overflow batch.
if (!bigTableRetainedMapping.containsOutputColumn(batchColumnIndex)) {
// Tolerate repeated use of a big table column.
bigTableRetainedMapping.add(batchColumnIndex, batchColumnIndex, typeInfo);
}
nextOutputColumn++;
}
/*
* Now determine the small table results.
*/
boolean smallTableExprVectorizes = true;
int firstSmallTableOutputColumn;
firstSmallTableOutputColumn = (order[0] == posBigTable ? bigTableRetainSize : 0);
int smallTableOutputCount = 0;
nextOutputColumn = firstSmallTableOutputColumn;
// Small table indices has more information (i.e. keys) than retain, so use it if it exists...
String[] bigTableRetainedNames;
if (smallTableIndicesSize > 0) {
smallTableOutputCount = smallTableIndicesSize;
bigTableRetainedNames = new String[smallTableOutputCount];
for (int i = 0; i < smallTableIndicesSize; i++) {
if (smallTableIndices[i] >= 0) {
// Zero and above numbers indicate a big table key is needed for
// small table result "area".
int keyIndex = smallTableIndices[i];
// Since bigTableKeyExpressions may do a calculation and produce a scratch column, we
// need to map the right column.
int batchKeyColumn = bigTableKeyColumnMap[keyIndex];
bigTableRetainedNames[i] = bigTableKeyColumnNames[keyIndex];
TypeInfo typeInfo = bigTableKeyTypeInfos[keyIndex];
if (!isOuterJoin) {
// Optimize inner join keys of small table results.
// Project the big table key into the small table result "area".
projectionMapping.add(nextOutputColumn, batchKeyColumn, typeInfo);
if (!bigTableRetainedMapping.containsOutputColumn(batchKeyColumn)) {
// If necessary, copy the big table key into the overflow batch's small table
// result "area".
bigTableRetainedMapping.add(batchKeyColumn, batchKeyColumn, typeInfo);
}
} else {
// For outer joins, since the small table key can be null when there is no match,
// we must have a physical (scratch) column for those keys. We cannot use the
// projection optimization used by inner joins above.
int scratchColumn = vContext.allocateScratchColumn(typeInfo);
projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
bigTableRetainedMapping.add(batchKeyColumn, scratchColumn, typeInfo);
bigTableOuterKeyMapping.add(batchKeyColumn, scratchColumn, typeInfo);
}
} else {
// Negative numbers indicate a column to be (deserialize) read from the small table's
// LazyBinary value row.
int smallTableValueIndex = -smallTableIndices[i] - 1;
ExprNodeDesc smallTableExprNode = smallTableExprs.get(i);
if (!validateExprNodeDesc(smallTableExprNode, "Small Table")) {
clearNotVectorizedReason();
smallTableExprVectorizes = false;
}
bigTableRetainedNames[i] = smallTableExprNode.toString();
TypeInfo typeInfo = smallTableExprNode.getTypeInfo();
// Make a new big table scratch column for the small table value.
int scratchColumn = vContext.allocateScratchColumn(typeInfo);
projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
smallTableMapping.add(smallTableValueIndex, scratchColumn, typeInfo);
}
nextOutputColumn++;
}
} else if (smallTableRetainSize > 0) {
smallTableOutputCount = smallTableRetainSize;
bigTableRetainedNames = new String[smallTableOutputCount];
for (int i = 0; i < smallTableRetainSize; i++) {
int smallTableValueIndex = smallTableRetainList.get(i);
ExprNodeDesc smallTableExprNode = smallTableExprs.get(i);
if (!validateExprNodeDesc(smallTableExprNode, "Small Table")) {
clearNotVectorizedReason();
smallTableExprVectorizes = false;
}
bigTableRetainedNames[i] = smallTableExprNode.toString();
// Make a new big table scratch column for the small table value.
TypeInfo typeInfo = smallTableExprNode.getTypeInfo();
int scratchColumn = vContext.allocateScratchColumn(typeInfo);
projectionMapping.add(nextOutputColumn, scratchColumn, typeInfo);
smallTableMapping.add(smallTableValueIndex, scratchColumn, typeInfo);
nextOutputColumn++;
}
} else {
bigTableRetainedNames = new String[0];
}
boolean useOptimizedTable = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE);
// Remember the condition variables for EXPLAIN regardless of whether we specialize or not.
vectorDesc.setUseOptimizedTable(useOptimizedTable);
vectorDesc.setIsVectorizationMapJoinNativeEnabled(isVectorizationMapJoinNativeEnabled);
vectorDesc.setEngine(engine);
vectorDesc.setOneMapJoinCondition(oneMapJoinCondition);
vectorDesc.setHasNullSafes(hasNullSafes);
vectorDesc.setSmallTableExprVectorizes(smallTableExprVectorizes);
vectorDesc.setIsFastHashTableEnabled(isFastHashTableEnabled);
vectorDesc.setIsHybridHashJoin(isHybridHashJoin);
vectorDesc.setSupportsKeyTypes(supportsKeyTypes);
if (!supportsKeyTypes) {
vectorDesc.setNotSupportedKeyTypes(new ArrayList(notSupportedKeyTypes));
}
// Check common conditions for both Optimized and Fast Hash Tables.
// Assume.
boolean result = true;
if (!useOptimizedTable || !isVectorizationMapJoinNativeEnabled || !isTezOrSpark || !oneMapJoinCondition || hasNullSafes || !smallTableExprVectorizes) {
result = false;
}
if (!isFastHashTableEnabled) {
// Check optimized-only hash table restrictions.
if (!supportsKeyTypes) {
result = false;
}
} else {
if (isHybridHashJoin) {
result = false;
}
}
// Convert dynamic arrays and maps to simple arrays.
bigTableRetainedMapping.finalize();
bigTableOuterKeyMapping.finalize();
smallTableMapping.finalize();
vectorMapJoinInfo.setBigTableRetainedMapping(bigTableRetainedMapping);
vectorMapJoinInfo.setBigTableOuterKeyMapping(bigTableOuterKeyMapping);
vectorMapJoinInfo.setSmallTableMapping(smallTableMapping);
projectionMapping.finalize();
// Verify we added an entry for each output.
assert projectionMapping.isSourceSequenceGood();
vectorMapJoinInfo.setProjectionMapping(projectionMapping);
return result;
}
use of org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo in project hive by apache.
the class Vectorizer method specializeReduceSinkOperator.
private Operator<? extends OperatorDesc> specializeReduceSinkOperator(Operator<? extends OperatorDesc> op, VectorizationContext vContext, ReduceSinkDesc desc, VectorReduceSinkInfo vectorReduceSinkInfo) throws HiveException {
Operator<? extends OperatorDesc> vectorOp = null;
Class<? extends Operator<?>> opClass = null;
Type[] reduceSinkKeyColumnVectorTypes = vectorReduceSinkInfo.getReduceSinkKeyColumnVectorTypes();
// By default, we can always use the multi-key class.
VectorReduceSinkDesc.ReduceSinkKeyType reduceSinkKeyType = VectorReduceSinkDesc.ReduceSinkKeyType.MULTI_KEY;
// Look for single column optimization.
if (reduceSinkKeyColumnVectorTypes.length == 1) {
LOG.info("Vectorizer vectorizeOperator groupby typeName " + vectorReduceSinkInfo.getReduceSinkKeyTypeInfos()[0]);
Type columnVectorType = reduceSinkKeyColumnVectorTypes[0];
switch(columnVectorType) {
case LONG:
{
PrimitiveCategory primitiveCategory = ((PrimitiveTypeInfo) vectorReduceSinkInfo.getReduceSinkKeyTypeInfos()[0]).getPrimitiveCategory();
switch(primitiveCategory) {
case BOOLEAN:
case BYTE:
case SHORT:
case INT:
case LONG:
reduceSinkKeyType = VectorReduceSinkDesc.ReduceSinkKeyType.LONG;
break;
default:
// Other integer types not supported yet.
break;
}
}
break;
case BYTES:
reduceSinkKeyType = VectorReduceSinkDesc.ReduceSinkKeyType.STRING;
default:
// Stay with multi-key.
break;
}
}
switch(reduceSinkKeyType) {
case LONG:
opClass = VectorReduceSinkLongOperator.class;
break;
case STRING:
opClass = VectorReduceSinkStringOperator.class;
break;
case MULTI_KEY:
opClass = VectorReduceSinkMultiKeyOperator.class;
break;
default:
throw new HiveException("Unknown reduce sink key type " + reduceSinkKeyType);
}
VectorReduceSinkDesc vectorDesc = (VectorReduceSinkDesc) desc.getVectorDesc();
vectorDesc.setReduceSinkKeyType(reduceSinkKeyType);
vectorDesc.setVectorReduceSinkInfo(vectorReduceSinkInfo);
vectorOp = OperatorFactory.getVectorOperator(opClass, op.getCompilationOpContext(), op.getConf(), vContext);
LOG.info("Vectorizer vectorizeOperator reduce sink class " + vectorOp.getClass().getSimpleName());
return vectorOp;
}
use of org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo in project hive by apache.
the class ReplicationSemanticAnalyzer method genPartSpecs.
private Map<Integer, List<ExprNodeGenericFuncDesc>> genPartSpecs(Table table, List<Map<String, String>> partitions) throws SemanticException {
Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs = new HashMap<Integer, List<ExprNodeGenericFuncDesc>>();
int partPrefixLength = 0;
if ((partitions != null) && (partitions.size() > 0)) {
partPrefixLength = partitions.get(0).size();
// pick the length of the first ptn, we expect all ptns listed to have the same number of
// key-vals.
}
List<ExprNodeGenericFuncDesc> ptnDescs = new ArrayList<ExprNodeGenericFuncDesc>();
for (Map<String, String> ptn : partitions) {
// convert each key-value-map to appropriate expression.
ExprNodeGenericFuncDesc expr = null;
for (Map.Entry<String, String> kvp : ptn.entrySet()) {
String key = kvp.getKey();
Object val = kvp.getValue();
String type = table.getPartColByName(key).getType();
;
PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(type);
ExprNodeColumnDesc column = new ExprNodeColumnDesc(pti, key, null, true);
ExprNodeGenericFuncDesc op = DDLSemanticAnalyzer.makeBinaryPredicate("=", column, new ExprNodeConstantDesc(pti, val));
expr = (expr == null) ? op : DDLSemanticAnalyzer.makeBinaryPredicate("and", expr, op);
}
if (expr != null) {
ptnDescs.add(expr);
}
}
if (ptnDescs.size() > 0) {
partSpecs.put(partPrefixLength, ptnDescs);
}
return partSpecs;
}
use of org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo in project hive by apache.
the class VectorExtractRow method initEntry.
/*
* Initialize one column's array entries.
*/
private void initEntry(int logicalColumnIndex, int projectionColumnNum, TypeInfo typeInfo) {
projectionColumnNums[logicalColumnIndex] = projectionColumnNum;
Category category = typeInfo.getCategory();
categories[logicalColumnIndex] = category;
if (category == Category.PRIMITIVE) {
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo;
PrimitiveCategory primitiveCategory = primitiveTypeInfo.getPrimitiveCategory();
primitiveCategories[logicalColumnIndex] = primitiveCategory;
switch(primitiveCategory) {
case CHAR:
maxLengths[logicalColumnIndex] = ((CharTypeInfo) primitiveTypeInfo).getLength();
break;
case VARCHAR:
maxLengths[logicalColumnIndex] = ((VarcharTypeInfo) primitiveTypeInfo).getLength();
break;
default:
// No additional data type specific setting.
break;
}
primitiveWritables[logicalColumnIndex] = VectorizedBatchUtil.getPrimitiveWritable(primitiveCategory);
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo in project hive by apache.
the class VectorizedRowBatchCtx method addPartitionColsToBatch.
/**
* Add the partition values to the batch
*
* @param batch
* @param partitionValues
* @throws HiveException
*/
public void addPartitionColsToBatch(VectorizedRowBatch batch, Object[] partitionValues) {
if (partitionValues != null) {
for (int i = 0; i < partitionColumnCount; i++) {
Object value = partitionValues[i];
int colIndex = dataColumnCount + i;
String partitionColumnName = rowColumnNames[colIndex];
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) rowColumnTypeInfos[colIndex];
switch(primitiveTypeInfo.getPrimitiveCategory()) {
case BOOLEAN:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill((Boolean) value == true ? 1 : 0);
lcv.isNull[0] = false;
}
}
break;
case BYTE:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill((Byte) value);
lcv.isNull[0] = false;
}
}
break;
case SHORT:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill((Short) value);
lcv.isNull[0] = false;
}
}
break;
case INT:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill((Integer) value);
lcv.isNull[0] = false;
}
}
break;
case LONG:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill((Long) value);
lcv.isNull[0] = false;
}
}
break;
case DATE:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill(DateWritable.dateToDays((Date) value));
lcv.isNull[0] = false;
}
}
break;
case TIMESTAMP:
{
TimestampColumnVector lcv = (TimestampColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill((Timestamp) value);
lcv.isNull[0] = false;
}
}
break;
case INTERVAL_YEAR_MONTH:
{
LongColumnVector lcv = (LongColumnVector) batch.cols[colIndex];
if (value == null) {
lcv.noNulls = false;
lcv.isNull[0] = true;
lcv.isRepeating = true;
} else {
lcv.fill(((HiveIntervalYearMonth) value).getTotalMonths());
lcv.isNull[0] = false;
}
}
case INTERVAL_DAY_TIME:
{
IntervalDayTimeColumnVector icv = (IntervalDayTimeColumnVector) batch.cols[colIndex];
if (value == null) {
icv.noNulls = false;
icv.isNull[0] = true;
icv.isRepeating = true;
} else {
icv.fill(((HiveIntervalDayTime) value));
icv.isNull[0] = false;
}
}
case FLOAT:
{
DoubleColumnVector dcv = (DoubleColumnVector) batch.cols[colIndex];
if (value == null) {
dcv.noNulls = false;
dcv.isNull[0] = true;
dcv.isRepeating = true;
} else {
dcv.fill((Float) value);
dcv.isNull[0] = false;
}
}
break;
case DOUBLE:
{
DoubleColumnVector dcv = (DoubleColumnVector) batch.cols[colIndex];
if (value == null) {
dcv.noNulls = false;
dcv.isNull[0] = true;
dcv.isRepeating = true;
} else {
dcv.fill((Double) value);
dcv.isNull[0] = false;
}
}
break;
case DECIMAL:
{
DecimalColumnVector dv = (DecimalColumnVector) batch.cols[colIndex];
if (value == null) {
dv.noNulls = false;
dv.isNull[0] = true;
dv.isRepeating = true;
} else {
HiveDecimal hd = (HiveDecimal) value;
dv.set(0, hd);
dv.isRepeating = true;
dv.isNull[0] = false;
}
}
break;
case BINARY:
{
BytesColumnVector bcv = (BytesColumnVector) batch.cols[colIndex];
byte[] bytes = (byte[]) value;
if (bytes == null) {
bcv.noNulls = false;
bcv.isNull[0] = true;
bcv.isRepeating = true;
} else {
bcv.fill(bytes);
bcv.isNull[0] = false;
}
}
break;
case STRING:
case CHAR:
case VARCHAR:
{
BytesColumnVector bcv = (BytesColumnVector) batch.cols[colIndex];
String sVal = value.toString();
if (sVal == null) {
bcv.noNulls = false;
bcv.isNull[0] = true;
bcv.isRepeating = true;
} else {
bcv.setVal(0, sVal.getBytes());
bcv.isRepeating = true;
}
}
break;
default:
throw new RuntimeException("Unable to recognize the partition type " + primitiveTypeInfo.getPrimitiveCategory() + " for column " + partitionColumnName);
}
}
}
}
Aggregations