use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class TestVectorTypeCasts method testCastTimestampToDouble.
@Test
public void testCastTimestampToDouble() {
double[] doubleValues = new double[500];
VectorizedRowBatch b = TestVectorMathFunctions.getVectorizedRowBatchTimestampInDoubleOut(doubleValues);
TimestampColumnVector inV = (TimestampColumnVector) b.cols[0];
DoubleColumnVector resultV = (DoubleColumnVector) b.cols[1];
b.cols[0].noNulls = true;
VectorExpression expr = new CastTimestampToDouble(0, 1);
expr.evaluate(b);
for (int i = 0; i < doubleValues.length; i++) {
double actual = resultV.vector[i];
double doubleValue = TimestampUtils.getDouble(inV.asScratchTimestamp(i));
assertEquals(actual, doubleValue, 0.000000001F);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class TestVectorizedORCReader method checkVectorizedReader.
private void checkVectorizedReader() throws Exception {
Reader vreader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf));
RecordReaderImpl vrr = (RecordReaderImpl) vreader.rows();
RecordReaderImpl rr = (RecordReaderImpl) reader.rows();
VectorizedRowBatch batch = reader.getSchema().createRowBatch();
OrcStruct row = null;
// Check Vectorized ORC reader against ORC row reader
while (vrr.nextBatch(batch)) {
for (int i = 0; i < batch.size; i++) {
row = (OrcStruct) rr.next(row);
for (int j = 0; j < batch.cols.length; j++) {
Object a = (row.getFieldValue(j));
ColumnVector cv = batch.cols[j];
// if the value is repeating, use row 0
int rowId = cv.isRepeating ? 0 : i;
// make sure the null flag agrees
if (a == null) {
Assert.assertEquals(true, !cv.noNulls && cv.isNull[rowId]);
} else if (a instanceof BooleanWritable) {
// Boolean values are stores a 1's and 0's, so convert and compare
Long temp = (long) (((BooleanWritable) a).get() ? 1 : 0);
long b = ((LongColumnVector) cv).vector[rowId];
Assert.assertEquals(temp.toString(), Long.toString(b));
} else if (a instanceof TimestampWritable) {
// Timestamps are stored as long, so convert and compare
TimestampWritable t = ((TimestampWritable) a);
TimestampColumnVector tcv = ((TimestampColumnVector) cv);
Assert.assertEquals(t.getTimestamp(), tcv.asScratchTimestamp(rowId));
} else if (a instanceof DateWritable) {
// Dates are stored as long, so convert and compare
DateWritable adt = (DateWritable) a;
long b = ((LongColumnVector) cv).vector[rowId];
Assert.assertEquals(adt.get().getTime(), DateWritable.daysToMillis((int) b));
} else if (a instanceof HiveDecimalWritable) {
// Decimals are stored as BigInteger, so convert and compare
HiveDecimalWritable dec = (HiveDecimalWritable) a;
HiveDecimalWritable b = ((DecimalColumnVector) cv).vector[i];
Assert.assertEquals(dec, b);
} else if (a instanceof DoubleWritable) {
double b = ((DoubleColumnVector) cv).vector[rowId];
assertEquals(a.toString(), Double.toString(b));
} else if (a instanceof Text) {
BytesColumnVector bcv = (BytesColumnVector) cv;
Text b = new Text();
b.set(bcv.vector[rowId], bcv.start[rowId], bcv.length[rowId]);
assertEquals(a, b);
} else if (a instanceof IntWritable || a instanceof LongWritable || a instanceof ByteWritable || a instanceof ShortWritable) {
assertEquals(a.toString(), Long.toString(((LongColumnVector) cv).vector[rowId]));
} else {
assertEquals("huh", a.getClass().getName());
}
}
}
// Check repeating
Assert.assertEquals(false, batch.cols[0].isRepeating);
Assert.assertEquals(false, batch.cols[1].isRepeating);
Assert.assertEquals(false, batch.cols[2].isRepeating);
Assert.assertEquals(true, batch.cols[3].isRepeating);
Assert.assertEquals(false, batch.cols[4].isRepeating);
Assert.assertEquals(false, batch.cols[5].isRepeating);
Assert.assertEquals(false, batch.cols[6].isRepeating);
Assert.assertEquals(false, batch.cols[7].isRepeating);
Assert.assertEquals(false, batch.cols[8].isRepeating);
Assert.assertEquals(false, batch.cols[9].isRepeating);
// Check non null
Assert.assertEquals(false, batch.cols[0].noNulls);
Assert.assertEquals(false, batch.cols[1].noNulls);
Assert.assertEquals(true, batch.cols[2].noNulls);
Assert.assertEquals(true, batch.cols[3].noNulls);
Assert.assertEquals(false, batch.cols[4].noNulls);
Assert.assertEquals(false, batch.cols[5].noNulls);
Assert.assertEquals(false, batch.cols[6].noNulls);
Assert.assertEquals(false, batch.cols[7].noNulls);
Assert.assertEquals(false, batch.cols[8].noNulls);
Assert.assertEquals(false, batch.cols[9].noNulls);
}
Assert.assertEquals(false, rr.nextBatch(batch));
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorizedRowGroupGenUtil method generateTimestampColumnVector.
public static TimestampColumnVector generateTimestampColumnVector(boolean nulls, boolean repeating, int size, Random rand, Timestamp[] timestampValues) {
TimestampColumnVector tcv = new TimestampColumnVector(size);
tcv.noNulls = !nulls;
tcv.isRepeating = repeating;
Timestamp repeatingTimestamp = RandomTypeUtil.getRandTimestamp(rand);
int nullFrequency = generateNullFrequency(rand);
for (int i = 0; i < size; i++) {
if (nulls && (repeating || i % nullFrequency == 0)) {
tcv.isNull[i] = true;
tcv.setNullValue(i);
timestampValues[i] = null;
} else {
tcv.isNull[i] = false;
if (!repeating) {
Timestamp randomTimestamp = RandomTypeUtil.getRandTimestamp(rand);
tcv.set(i, randomTimestamp);
timestampValues[i] = randomTimestamp;
} else {
tcv.set(i, repeatingTimestamp);
timestampValues[i] = repeatingTimestamp;
}
}
}
return tcv;
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorizedPrimitiveColumnReader method decodeDictionaryIds.
/**
* Reads `num` values into column, decoding the values from `dictionaryIds` and `dictionary`.
*/
private void decodeDictionaryIds(int rowId, int num, ColumnVector column, TypeInfo columnType, LongColumnVector dictionaryIds) {
System.arraycopy(dictionaryIds.isNull, rowId, column.isNull, rowId, num);
if (column.noNulls) {
column.noNulls = dictionaryIds.noNulls;
}
column.isRepeating = column.isRepeating && dictionaryIds.isRepeating;
PrimitiveTypeInfo primitiveColumnType = (PrimitiveTypeInfo) columnType;
switch(primitiveColumnType.getPrimitiveCategory()) {
case INT:
case BYTE:
case SHORT:
for (int i = rowId; i < rowId + num; ++i) {
((LongColumnVector) column).vector[i] = dictionary.readInteger((int) dictionaryIds.vector[i]);
if (!(dictionary.isValid(((LongColumnVector) column).vector[i]))) {
setNullValue(column, i);
((LongColumnVector) column).vector[i] = 0;
}
}
break;
case DATE:
case INTERVAL_YEAR_MONTH:
case LONG:
for (int i = rowId; i < rowId + num; ++i) {
((LongColumnVector) column).vector[i] = dictionary.readLong((int) dictionaryIds.vector[i]);
}
break;
case BOOLEAN:
for (int i = rowId; i < rowId + num; ++i) {
((LongColumnVector) column).vector[i] = dictionary.readBoolean((int) dictionaryIds.vector[i]) ? 1 : 0;
}
break;
case DOUBLE:
for (int i = rowId; i < rowId + num; ++i) {
((DoubleColumnVector) column).vector[i] = dictionary.readDouble((int) dictionaryIds.vector[i]);
}
break;
case BINARY:
for (int i = rowId; i < rowId + num; ++i) {
((BytesColumnVector) column).setVal(i, dictionary.readBytes((int) dictionaryIds.vector[i]));
}
break;
case STRING:
for (int i = rowId; i < rowId + num; ++i) {
((BytesColumnVector) column).setVal(i, dictionary.readString((int) dictionaryIds.vector[i]));
}
break;
case VARCHAR:
for (int i = rowId; i < rowId + num; ++i) {
((BytesColumnVector) column).setVal(i, dictionary.readVarchar((int) dictionaryIds.vector[i]));
}
break;
case CHAR:
for (int i = rowId; i < rowId + num; ++i) {
((BytesColumnVector) column).setVal(i, dictionary.readChar((int) dictionaryIds.vector[i]));
}
break;
case FLOAT:
for (int i = rowId; i < rowId + num; ++i) {
((DoubleColumnVector) column).vector[i] = dictionary.readFloat((int) dictionaryIds.vector[i]);
}
break;
case DECIMAL:
decimalTypeCheck(type);
DecimalColumnVector decimalColumnVector = ((DecimalColumnVector) column);
decimalColumnVector.precision = (short) type.asPrimitiveType().getDecimalMetadata().getPrecision();
decimalColumnVector.scale = (short) type.asPrimitiveType().getDecimalMetadata().getScale();
for (int i = rowId; i < rowId + num; ++i) {
decimalColumnVector.vector[i].set(dictionary.readDecimal((int) dictionaryIds.vector[i]), decimalColumnVector.scale);
}
break;
case TIMESTAMP:
for (int i = rowId; i < rowId + num; ++i) {
((TimestampColumnVector) column).set(i, dictionary.readTimestamp((int) dictionaryIds.vector[i]));
}
break;
case INTERVAL_DAY_TIME:
default:
throw new UnsupportedOperationException("Unsupported type: " + type);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorUDFTimestampFieldTimestamp method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) {
Preconditions.checkState(((PrimitiveTypeInfo) inputTypeInfos[0]).getPrimitiveCategory() == PrimitiveCategory.TIMESTAMP);
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector outV = (LongColumnVector) batch.cols[outputColumnNum];
ColumnVector inputColVec = batch.cols[this.colNum];
/* every line below this is identical for evaluateLong & evaluateString */
final int n = inputColVec.isRepeating ? 1 : batch.size;
int[] sel = batch.selected;
final boolean selectedInUse = (inputColVec.isRepeating == false) && batch.selectedInUse;
if (batch.size == 0) {
/* n != batch.size when isRepeating */
return;
}
// We do not need to do a column reset since we are carefully changing the output.
outV.isRepeating = false;
TimestampColumnVector timestampColVector = (TimestampColumnVector) inputColVec;
if (inputColVec.isRepeating) {
if (inputColVec.noNulls || !inputColVec.isNull[0]) {
outV.isNull[0] = false;
outV.vector[0] = getTimestampField(timestampColVector, 0);
} else {
outV.isNull[0] = true;
outV.noNulls = false;
}
outV.isRepeating = true;
return;
}
if (inputColVec.noNulls) {
if (selectedInUse) {
for (int j = 0; j < n; j++) {
int i = sel[j];
outV.isNull[i] = false;
outV.vector[i] = getTimestampField(timestampColVector, i);
}
} else {
Arrays.fill(outV.isNull, 0, n, false);
for (int i = 0; i < n; i++) {
outV.vector[i] = getTimestampField(timestampColVector, i);
}
}
} else /* there are nulls in the inputColVector */
{
// Carefully handle NULLs...
outV.noNulls = false;
if (selectedInUse) {
for (int j = 0; j < n; j++) {
int i = sel[j];
outV.isNull[i] = inputColVec.isNull[i];
if (!inputColVec.isNull[i]) {
outV.vector[i] = getTimestampField(timestampColVector, i);
}
}
} else {
for (int i = 0; i < n; i++) {
outV.isNull[i] = inputColVec.isNull[i];
if (!inputColVec.isNull[i]) {
outV.vector[i] = getTimestampField(timestampColVector, i);
}
}
}
}
}
Aggregations