use of org.apache.hadoop.hive.serde2.io.DateWritable in project hive by apache.
the class TestGenericUDFDateAdd method testTimestampToDate.
public void testTimestampToDate() throws HiveException {
GenericUDFDateAdd udf = new GenericUDFDateAdd();
ObjectInspector valueOI1 = PrimitiveObjectInspectorFactory.writableTimestampObjectInspector;
ObjectInspector valueOI2 = PrimitiveObjectInspectorFactory.javaIntObjectInspector;
ObjectInspector[] arguments = { valueOI1, valueOI2 };
udf.initialize(arguments);
DeferredObject valueObj1 = new DeferredJavaObject(new TimestampWritable(new Timestamp(109, 06, 20, 4, 17, 52, 0)));
DeferredObject valueObj2 = new DeferredJavaObject(new Integer("3"));
DeferredObject[] args = { valueObj1, valueObj2 };
DateWritable output = (DateWritable) udf.evaluate(args);
assertEquals("date_add() test for TIMESTAMP failed ", "2009-07-23", output.toString());
// Test with null args
args = new DeferredObject[] { new DeferredJavaObject(null), valueObj2 };
assertNull("date_add() 1st arg null", udf.evaluate(args));
args = new DeferredObject[] { valueObj1, new DeferredJavaObject(null) };
assertNull("date_add() 2nd arg null", udf.evaluate(args));
args = new DeferredObject[] { new DeferredJavaObject(null), new DeferredJavaObject(null) };
assertNull("date_add() both args null", udf.evaluate(args));
}
use of org.apache.hadoop.hive.serde2.io.DateWritable in project hive by apache.
the class VectorAssignRow method assignConvertRowColumn.
/**
* Convert row's column object and then assign it the ColumnVector at batchIndex
* in the VectorizedRowBatch.
*
* Public so VectorDeserializeRow can use this method to convert a row's column object.
*
* @param batch
* @param batchIndex
* @param logicalColumnIndex
* @param object The row column object whose type is the VectorAssignRow.initConversion
* source data type.
*
*/
public void assignConvertRowColumn(VectorizedRowBatch batch, int batchIndex, int logicalColumnIndex, Object object) {
Preconditions.checkState(isConvert[logicalColumnIndex]);
Category targetCategory = targetCategories[logicalColumnIndex];
if (targetCategory == null) {
/*
* This is a column that we don't want (i.e. not included) -- we are done.
*/
return;
}
final int projectionColumnNum = projectionColumnNums[logicalColumnIndex];
if (object == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
try {
switch(targetCategory) {
case PRIMITIVE:
PrimitiveCategory targetPrimitiveCategory = targetPrimitiveCategories[logicalColumnIndex];
switch(targetPrimitiveCategory) {
case VOID:
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
case BOOLEAN:
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = (PrimitiveObjectInspectorUtils.getBoolean(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]) ? 1 : 0);
break;
case BYTE:
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = PrimitiveObjectInspectorUtils.getByte(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
break;
case SHORT:
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = PrimitiveObjectInspectorUtils.getShort(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
break;
case INT:
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = PrimitiveObjectInspectorUtils.getInt(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
break;
case LONG:
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = PrimitiveObjectInspectorUtils.getLong(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
break;
case TIMESTAMP:
{
Timestamp timestamp = PrimitiveObjectInspectorUtils.getTimestamp(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (timestamp == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
((TimestampColumnVector) batch.cols[projectionColumnNum]).set(batchIndex, timestamp);
}
break;
case DATE:
{
Date date = PrimitiveObjectInspectorUtils.getDate(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (date == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
DateWritable dateWritable = (DateWritable) convertTargetWritables[logicalColumnIndex];
dateWritable.set(date);
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = dateWritable.getDays();
}
break;
case FLOAT:
((DoubleColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = PrimitiveObjectInspectorUtils.getFloat(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
break;
case DOUBLE:
((DoubleColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = PrimitiveObjectInspectorUtils.getDouble(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
break;
case BINARY:
{
BytesWritable bytesWritable = PrimitiveObjectInspectorUtils.getBinary(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (bytesWritable == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
((BytesColumnVector) batch.cols[projectionColumnNum]).setVal(batchIndex, bytesWritable.getBytes(), 0, bytesWritable.getLength());
}
break;
case STRING:
{
String string = PrimitiveObjectInspectorUtils.getString(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (string == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
Text text = (Text) convertTargetWritables[logicalColumnIndex];
text.set(string);
((BytesColumnVector) batch.cols[projectionColumnNum]).setVal(batchIndex, text.getBytes(), 0, text.getLength());
}
break;
case VARCHAR:
{
// UNDONE: Performance problem with conversion to String, then bytes...
HiveVarchar hiveVarchar = PrimitiveObjectInspectorUtils.getHiveVarchar(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (hiveVarchar == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
// TODO: Do we need maxLength checking?
byte[] bytes = hiveVarchar.getValue().getBytes();
((BytesColumnVector) batch.cols[projectionColumnNum]).setVal(batchIndex, bytes, 0, bytes.length);
}
break;
case CHAR:
{
// UNDONE: Performance problem with conversion to String, then bytes...
HiveChar hiveChar = PrimitiveObjectInspectorUtils.getHiveChar(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (hiveChar == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
// We store CHAR in vector row batch with padding stripped.
// TODO: Do we need maxLength checking?
byte[] bytes = hiveChar.getStrippedValue().getBytes();
((BytesColumnVector) batch.cols[projectionColumnNum]).setVal(batchIndex, bytes, 0, bytes.length);
}
break;
case DECIMAL:
{
HiveDecimal hiveDecimal = PrimitiveObjectInspectorUtils.getHiveDecimal(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (hiveDecimal == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
((DecimalColumnVector) batch.cols[projectionColumnNum]).set(batchIndex, hiveDecimal);
}
break;
case INTERVAL_YEAR_MONTH:
{
HiveIntervalYearMonth intervalYearMonth = PrimitiveObjectInspectorUtils.getHiveIntervalYearMonth(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (intervalYearMonth == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
((LongColumnVector) batch.cols[projectionColumnNum]).vector[batchIndex] = intervalYearMonth.getTotalMonths();
}
break;
case INTERVAL_DAY_TIME:
{
HiveIntervalDayTime intervalDayTime = PrimitiveObjectInspectorUtils.getHiveIntervalDayTime(object, convertSourcePrimitiveObjectInspectors[logicalColumnIndex]);
if (intervalDayTime == null) {
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
((IntervalDayTimeColumnVector) batch.cols[projectionColumnNum]).set(batchIndex, intervalDayTime);
}
break;
default:
throw new RuntimeException("Primitive category " + targetPrimitiveCategory.name() + " not supported");
}
break;
default:
throw new RuntimeException("Category " + targetCategory.name() + " not supported");
}
} catch (NumberFormatException e) {
// Some of the conversion methods throw this exception on numeric parsing errors.
VectorizedBatchUtil.setNullColIsNullValue(batch.cols[projectionColumnNum], batchIndex);
return;
}
// We always set the null flag to false when there is a value.
batch.cols[projectionColumnNum].isNull[batchIndex] = false;
}
use of org.apache.hadoop.hive.serde2.io.DateWritable in project hive by apache.
the class VectorColumnAssignFactory method buildObjectAssign.
public static VectorColumnAssign buildObjectAssign(VectorizedRowBatch outputBatch, int outColIndex, PrimitiveCategory category) throws HiveException {
VectorColumnAssign outVCA = null;
ColumnVector destCol = outputBatch.cols[outColIndex];
if (destCol == null) {
switch(category) {
case VOID:
outVCA = new VectorLongColumnAssign() {
// This is a dummy assigner
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
// This is no-op, there is no column to assign to and val is expected to be null
assert (val == null);
}
};
break;
default:
throw new HiveException("Incompatible (null) vector column and primitive category " + category);
}
} else if (destCol instanceof LongColumnVector) {
switch(category) {
case BOOLEAN:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
BooleanWritable bw = (BooleanWritable) val;
assignLong(bw.get() ? 1 : 0, destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case BYTE:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
ByteWritable bw = (ByteWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case SHORT:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
ShortWritable bw = (ShortWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case INT:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
IntWritable bw = (IntWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case LONG:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
LongWritable bw = (LongWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case TIMESTAMP:
outVCA = new VectorTimestampColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
assignTimestamp((TimestampWritable) val, destIndex);
}
}
}.init(outputBatch, (TimestampColumnVector) destCol);
break;
case DATE:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
DateWritable bw = (DateWritable) val;
assignLong(bw.getDays(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case INTERVAL_YEAR_MONTH:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
HiveIntervalYearMonthWritable bw = (HiveIntervalYearMonthWritable) val;
assignLong(bw.getHiveIntervalYearMonth().getTotalMonths(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case INTERVAL_DAY_TIME:
outVCA = new VectorIntervalDayTimeColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
HiveIntervalDayTimeWritable bw = (HiveIntervalDayTimeWritable) val;
assignIntervalDayTime(bw.getHiveIntervalDayTime(), destIndex);
}
}
}.init(outputBatch, (IntervalDayTimeColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Long vector column and primitive category " + category);
}
} else if (destCol instanceof DoubleColumnVector) {
switch(category) {
case DOUBLE:
outVCA = new VectorDoubleColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
DoubleWritable bw = (DoubleWritable) val;
assignDouble(bw.get(), destIndex);
}
}
}.init(outputBatch, (DoubleColumnVector) destCol);
break;
case FLOAT:
outVCA = new VectorDoubleColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
FloatWritable bw = (FloatWritable) val;
assignDouble(bw.get(), destIndex);
}
}
}.init(outputBatch, (DoubleColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Double vector column and primitive category " + category);
}
} else if (destCol instanceof BytesColumnVector) {
switch(category) {
case BINARY:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
BytesWritable bw = (BytesWritable) val;
byte[] bytes = bw.getBytes();
assignBytes(bytes, 0, bw.getLength(), destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
case STRING:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
Text bw = (Text) val;
byte[] bytes = bw.getBytes();
assignBytes(bytes, 0, bw.getLength(), destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
case VARCHAR:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
// We store VARCHAR type stripped of pads.
HiveVarchar hiveVarchar;
if (val instanceof HiveVarchar) {
hiveVarchar = (HiveVarchar) val;
} else {
hiveVarchar = ((HiveVarcharWritable) val).getHiveVarchar();
}
byte[] bytes = hiveVarchar.getValue().getBytes();
assignBytes(bytes, 0, bytes.length, destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
case CHAR:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
// We store CHAR type stripped of pads.
HiveChar hiveChar;
if (val instanceof HiveChar) {
hiveChar = (HiveChar) val;
} else {
hiveChar = ((HiveCharWritable) val).getHiveChar();
}
byte[] bytes = hiveChar.getStrippedValue().getBytes();
assignBytes(bytes, 0, bytes.length, destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Bytes vector column and primitive category " + category);
}
} else if (destCol instanceof DecimalColumnVector) {
switch(category) {
case DECIMAL:
outVCA = new VectorDecimalColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
if (val instanceof HiveDecimal) {
assignDecimal((HiveDecimal) val, destIndex);
} else {
assignDecimal((HiveDecimalWritable) val, destIndex);
}
}
}
}.init(outputBatch, (DecimalColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Decimal vector column and primitive category " + category);
}
} else {
throw new HiveException("Unknown vector column type " + destCol.getClass().getName());
}
return outVCA;
}
use of org.apache.hadoop.hive.serde2.io.DateWritable in project hive by apache.
the class VectorDeserializeRow method convertRowColumn.
/**
* Convert one row column value that is the current value in deserializeRead.
*
* We deserialize into a writable and then pass that writable to an instance of VectorAssignRow
* to convert the writable to the target data type and assign it into the VectorizedRowBatch.
*
* @param batch
* @param batchIndex
* @param logicalColumnIndex
* @throws IOException
*/
private void convertRowColumn(VectorizedRowBatch batch, int batchIndex, int logicalColumnIndex) throws IOException {
final int projectionColumnNum = projectionColumnNums[logicalColumnIndex];
Writable convertSourceWritable = convertSourceWritables[logicalColumnIndex];
switch(sourceCategories[logicalColumnIndex]) {
case PRIMITIVE:
{
switch(sourcePrimitiveCategories[logicalColumnIndex]) {
case VOID:
convertSourceWritable = null;
break;
case BOOLEAN:
((BooleanWritable) convertSourceWritable).set(deserializeRead.currentBoolean);
break;
case BYTE:
((ByteWritable) convertSourceWritable).set(deserializeRead.currentByte);
break;
case SHORT:
((ShortWritable) convertSourceWritable).set(deserializeRead.currentShort);
break;
case INT:
((IntWritable) convertSourceWritable).set(deserializeRead.currentInt);
break;
case LONG:
((LongWritable) convertSourceWritable).set(deserializeRead.currentLong);
break;
case TIMESTAMP:
((TimestampWritable) convertSourceWritable).set(deserializeRead.currentTimestampWritable);
break;
case DATE:
((DateWritable) convertSourceWritable).set(deserializeRead.currentDateWritable);
break;
case FLOAT:
((FloatWritable) convertSourceWritable).set(deserializeRead.currentFloat);
break;
case DOUBLE:
((DoubleWritable) convertSourceWritable).set(deserializeRead.currentDouble);
break;
case BINARY:
if (deserializeRead.currentBytes == null) {
LOG.info("null binary entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
((BytesWritable) convertSourceWritable).set(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength);
break;
case STRING:
if (deserializeRead.currentBytes == null) {
throw new RuntimeException("null string entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
// Use org.apache.hadoop.io.Text as our helper to go from byte[] to String.
((Text) convertSourceWritable).set(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength);
break;
case VARCHAR:
{
// that does not use Java String objects.
if (deserializeRead.currentBytes == null) {
throw new RuntimeException("null varchar entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
int adjustedLength = StringExpr.truncate(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength, maxLengths[logicalColumnIndex]);
((HiveVarcharWritable) convertSourceWritable).set(new String(deserializeRead.currentBytes, deserializeRead.currentBytesStart, adjustedLength, Charsets.UTF_8), -1);
}
break;
case CHAR:
{
// that does not use Java String objects.
if (deserializeRead.currentBytes == null) {
throw new RuntimeException("null char entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
int adjustedLength = StringExpr.rightTrimAndTruncate(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength, maxLengths[logicalColumnIndex]);
((HiveCharWritable) convertSourceWritable).set(new String(deserializeRead.currentBytes, deserializeRead.currentBytesStart, adjustedLength, Charsets.UTF_8), -1);
}
break;
case DECIMAL:
((HiveDecimalWritable) convertSourceWritable).set(deserializeRead.currentHiveDecimalWritable);
break;
case INTERVAL_YEAR_MONTH:
((HiveIntervalYearMonthWritable) convertSourceWritable).set(deserializeRead.currentHiveIntervalYearMonthWritable);
break;
case INTERVAL_DAY_TIME:
((HiveIntervalDayTimeWritable) convertSourceWritable).set(deserializeRead.currentHiveIntervalDayTimeWritable);
break;
default:
throw new RuntimeException("Primitive category " + sourcePrimitiveCategories[logicalColumnIndex] + " not supported");
}
}
break;
default:
throw new RuntimeException("Category " + sourceCategories[logicalColumnIndex] + " not supported");
}
/*
* Convert our source object we just read into the target object and store that in the
* VectorizedRowBatch.
*/
convertVectorAssignRow.assignConvertRowColumn(batch, batchIndex, logicalColumnIndex, convertSourceWritable);
}
use of org.apache.hadoop.hive.serde2.io.DateWritable in project hive by apache.
the class VectorExtractRow method extractRowColumn.
/**
* Extract a row's column object from the ColumnVector at batchIndex in the VectorizedRowBatch.
*
* @param batch
* @param batchIndex
* @param logicalColumnIndex
* @return
*/
public Object extractRowColumn(VectorizedRowBatch batch, int batchIndex, int logicalColumnIndex) {
final int projectionColumnNum = projectionColumnNums[logicalColumnIndex];
ColumnVector colVector = batch.cols[projectionColumnNum];
if (colVector == null) {
// may ask for them..
return null;
}
int adjustedIndex = (colVector.isRepeating ? 0 : batchIndex);
if (!colVector.noNulls && colVector.isNull[adjustedIndex]) {
return null;
}
Category category = categories[logicalColumnIndex];
switch(category) {
case PRIMITIVE:
{
Writable primitiveWritable = primitiveWritables[logicalColumnIndex];
PrimitiveCategory primitiveCategory = primitiveCategories[logicalColumnIndex];
switch(primitiveCategory) {
case VOID:
return null;
case BOOLEAN:
((BooleanWritable) primitiveWritable).set(((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex] == 0 ? false : true);
return primitiveWritable;
case BYTE:
((ByteWritable) primitiveWritable).set((byte) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case SHORT:
((ShortWritable) primitiveWritable).set((short) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case INT:
((IntWritable) primitiveWritable).set((int) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case LONG:
((LongWritable) primitiveWritable).set(((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case TIMESTAMP:
((TimestampWritable) primitiveWritable).set(((TimestampColumnVector) batch.cols[projectionColumnNum]).asScratchTimestamp(adjustedIndex));
return primitiveWritable;
case DATE:
((DateWritable) primitiveWritable).set((int) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case FLOAT:
((FloatWritable) primitiveWritable).set((float) ((DoubleColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case DOUBLE:
((DoubleWritable) primitiveWritable).set(((DoubleColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case BINARY:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
LOG.info("null binary entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
BytesWritable bytesWritable = (BytesWritable) primitiveWritable;
bytesWritable.set(bytes, start, length);
return primitiveWritable;
}
case STRING:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
nullBytesReadError(primitiveCategory, batchIndex, projectionColumnNum);
}
// Use org.apache.hadoop.io.Text as our helper to go from byte[] to String.
((Text) primitiveWritable).set(bytes, start, length);
return primitiveWritable;
}
case VARCHAR:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
nullBytesReadError(primitiveCategory, batchIndex, projectionColumnNum);
}
int adjustedLength = StringExpr.truncate(bytes, start, length, maxLengths[logicalColumnIndex]);
HiveVarcharWritable hiveVarcharWritable = (HiveVarcharWritable) primitiveWritable;
hiveVarcharWritable.set(new String(bytes, start, adjustedLength, Charsets.UTF_8), -1);
return primitiveWritable;
}
case CHAR:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
nullBytesReadError(primitiveCategory, batchIndex, projectionColumnNum);
}
int adjustedLength = StringExpr.rightTrimAndTruncate(bytes, start, length, maxLengths[logicalColumnIndex]);
HiveCharWritable hiveCharWritable = (HiveCharWritable) primitiveWritable;
hiveCharWritable.set(new String(bytes, start, adjustedLength, Charsets.UTF_8), maxLengths[logicalColumnIndex]);
return primitiveWritable;
}
case DECIMAL:
// The HiveDecimalWritable set method will quickly copy the deserialized decimal writable fields.
((HiveDecimalWritable) primitiveWritable).set(((DecimalColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case INTERVAL_YEAR_MONTH:
((HiveIntervalYearMonthWritable) primitiveWritable).set((int) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case INTERVAL_DAY_TIME:
((HiveIntervalDayTimeWritable) primitiveWritable).set(((IntervalDayTimeColumnVector) batch.cols[projectionColumnNum]).asScratchIntervalDayTime(adjustedIndex));
return primitiveWritable;
default:
throw new RuntimeException("Primitive category " + primitiveCategory.name() + " not supported");
}
}
default:
throw new RuntimeException("Category " + category.name() + " not supported");
}
}
Aggregations