use of org.apache.hadoop.hive.serde2.io.TimestampWritable in project hive by apache.
the class VectorExtractRow method extractRowColumn.
/**
* Extract a row's column object from the ColumnVector at batchIndex in the VectorizedRowBatch.
*
* @param batch
* @param batchIndex
* @param logicalColumnIndex
* @return
*/
public Object extractRowColumn(VectorizedRowBatch batch, int batchIndex, int logicalColumnIndex) {
final int projectionColumnNum = projectionColumnNums[logicalColumnIndex];
ColumnVector colVector = batch.cols[projectionColumnNum];
if (colVector == null) {
// may ask for them..
return null;
}
int adjustedIndex = (colVector.isRepeating ? 0 : batchIndex);
if (!colVector.noNulls && colVector.isNull[adjustedIndex]) {
return null;
}
Category category = categories[logicalColumnIndex];
switch(category) {
case PRIMITIVE:
{
Writable primitiveWritable = primitiveWritables[logicalColumnIndex];
PrimitiveCategory primitiveCategory = primitiveCategories[logicalColumnIndex];
switch(primitiveCategory) {
case VOID:
return null;
case BOOLEAN:
((BooleanWritable) primitiveWritable).set(((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex] == 0 ? false : true);
return primitiveWritable;
case BYTE:
((ByteWritable) primitiveWritable).set((byte) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case SHORT:
((ShortWritable) primitiveWritable).set((short) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case INT:
((IntWritable) primitiveWritable).set((int) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case LONG:
((LongWritable) primitiveWritable).set(((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case TIMESTAMP:
((TimestampWritable) primitiveWritable).set(((TimestampColumnVector) batch.cols[projectionColumnNum]).asScratchTimestamp(adjustedIndex));
return primitiveWritable;
case DATE:
((DateWritable) primitiveWritable).set((int) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case FLOAT:
((FloatWritable) primitiveWritable).set((float) ((DoubleColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case DOUBLE:
((DoubleWritable) primitiveWritable).set(((DoubleColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case BINARY:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
LOG.info("null binary entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
BytesWritable bytesWritable = (BytesWritable) primitiveWritable;
bytesWritable.set(bytes, start, length);
return primitiveWritable;
}
case STRING:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
nullBytesReadError(primitiveCategory, batchIndex, projectionColumnNum);
}
// Use org.apache.hadoop.io.Text as our helper to go from byte[] to String.
((Text) primitiveWritable).set(bytes, start, length);
return primitiveWritable;
}
case VARCHAR:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
nullBytesReadError(primitiveCategory, batchIndex, projectionColumnNum);
}
int adjustedLength = StringExpr.truncate(bytes, start, length, maxLengths[logicalColumnIndex]);
HiveVarcharWritable hiveVarcharWritable = (HiveVarcharWritable) primitiveWritable;
hiveVarcharWritable.set(new String(bytes, start, adjustedLength, Charsets.UTF_8), -1);
return primitiveWritable;
}
case CHAR:
{
BytesColumnVector bytesColVector = ((BytesColumnVector) batch.cols[projectionColumnNum]);
byte[] bytes = bytesColVector.vector[adjustedIndex];
int start = bytesColVector.start[adjustedIndex];
int length = bytesColVector.length[adjustedIndex];
if (bytes == null) {
nullBytesReadError(primitiveCategory, batchIndex, projectionColumnNum);
}
int adjustedLength = StringExpr.rightTrimAndTruncate(bytes, start, length, maxLengths[logicalColumnIndex]);
HiveCharWritable hiveCharWritable = (HiveCharWritable) primitiveWritable;
hiveCharWritable.set(new String(bytes, start, adjustedLength, Charsets.UTF_8), maxLengths[logicalColumnIndex]);
return primitiveWritable;
}
case DECIMAL:
// The HiveDecimalWritable set method will quickly copy the deserialized decimal writable fields.
((HiveDecimalWritable) primitiveWritable).set(((DecimalColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case INTERVAL_YEAR_MONTH:
((HiveIntervalYearMonthWritable) primitiveWritable).set((int) ((LongColumnVector) batch.cols[projectionColumnNum]).vector[adjustedIndex]);
return primitiveWritable;
case INTERVAL_DAY_TIME:
((HiveIntervalDayTimeWritable) primitiveWritable).set(((IntervalDayTimeColumnVector) batch.cols[projectionColumnNum]).asScratchIntervalDayTime(adjustedIndex));
return primitiveWritable;
default:
throw new RuntimeException("Primitive category " + primitiveCategory.name() + " not supported");
}
}
default:
throw new RuntimeException("Category " + category.name() + " not supported");
}
}
use of org.apache.hadoop.hive.serde2.io.TimestampWritable in project hive by apache.
the class TestGenericUDFOPMinus method testDateMinusIntervalDayTime.
@Test
public void testDateMinusIntervalDayTime() throws Exception {
GenericUDFOPMinus udf = new GenericUDFOPMinus();
DateWritable left = new DateWritable(Date.valueOf("2001-01-01"));
HiveIntervalDayTimeWritable right = new HiveIntervalDayTimeWritable(HiveIntervalDayTime.valueOf("1 0:0:0.555"));
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableDateObjectInspector, PrimitiveObjectInspectorFactory.writableHiveIntervalDayTimeObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(left), new DeferredJavaObject(right) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.timestampTypeInfo, oi.getTypeInfo());
TimestampWritable res = (TimestampWritable) udf.evaluate(args);
Assert.assertEquals(Timestamp.valueOf("2000-12-30 23:59:59.445"), res.getTimestamp());
}
use of org.apache.hadoop.hive.serde2.io.TimestampWritable in project hive by apache.
the class TestGenericUDFOPMinus method testTimestampMinusIntervalYearMonth.
@Test
public void testTimestampMinusIntervalYearMonth() throws Exception {
GenericUDFOPMinus udf = new GenericUDFOPMinus();
TimestampWritable left = new TimestampWritable(Timestamp.valueOf("2004-01-15 01:02:03.123456789"));
HiveIntervalYearMonthWritable right = new HiveIntervalYearMonthWritable(HiveIntervalYearMonth.valueOf("2-2"));
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableTimestampObjectInspector, PrimitiveObjectInspectorFactory.writableHiveIntervalYearMonthObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(left), new DeferredJavaObject(right) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.timestampTypeInfo, oi.getTypeInfo());
TimestampWritable res = (TimestampWritable) udf.evaluate(args);
Assert.assertEquals(Timestamp.valueOf("2001-11-15 01:02:03.123456789"), res.getTimestamp());
}
use of org.apache.hadoop.hive.serde2.io.TimestampWritable in project hive by apache.
the class TestGenericUDFOPPlus method testIntervalYearMonthPlusTimestamp.
@Test
public void testIntervalYearMonthPlusTimestamp() throws Exception {
GenericUDFOPPlus udf = new GenericUDFOPPlus();
HiveIntervalYearMonthWritable left = new HiveIntervalYearMonthWritable(HiveIntervalYearMonth.valueOf("2-2"));
TimestampWritable right = new TimestampWritable(Timestamp.valueOf("2001-11-15 01:02:03.123456789"));
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableHiveIntervalYearMonthObjectInspector, PrimitiveObjectInspectorFactory.writableTimestampObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(left), new DeferredJavaObject(right) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.timestampTypeInfo, oi.getTypeInfo());
TimestampWritable res = (TimestampWritable) udf.evaluate(args);
Assert.assertEquals(Timestamp.valueOf("2004-01-15 01:02:03.123456789"), res.getTimestamp());
}
use of org.apache.hadoop.hive.serde2.io.TimestampWritable in project hive by apache.
the class TestGenericUDFLastDay method runAndVerifyTs.
private void runAndVerifyTs(String str, String expResult, GenericUDF udf) throws HiveException {
DeferredObject valueObj0 = new DeferredJavaObject(str != null ? new TimestampWritable(Timestamp.valueOf(str)) : null);
DeferredObject[] args = { valueObj0 };
Text output = (Text) udf.evaluate(args);
assertEquals("last_day() test ", expResult, output != null ? output.toString() : null);
}
Aggregations