use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorizedPrimitiveColumnReader method decodeDictionaryIds.
/**
* Reads `num` values into column, decoding the values from `dictionaryIds` and `dictionary`.
*/
private void decodeDictionaryIds(int rowId, int num, ColumnVector column, LongColumnVector dictionaryIds) {
System.arraycopy(dictionaryIds.isNull, rowId, column.isNull, rowId, num);
if (column.noNulls) {
column.noNulls = dictionaryIds.noNulls;
}
column.isRepeating = column.isRepeating && dictionaryIds.isRepeating;
switch(descriptor.getType()) {
case INT32:
for (int i = rowId; i < rowId + num; ++i) {
((LongColumnVector) column).vector[i] = dictionary.decodeToInt((int) dictionaryIds.vector[i]);
}
break;
case INT64:
for (int i = rowId; i < rowId + num; ++i) {
((LongColumnVector) column).vector[i] = dictionary.decodeToLong((int) dictionaryIds.vector[i]);
}
break;
case FLOAT:
for (int i = rowId; i < rowId + num; ++i) {
((DoubleColumnVector) column).vector[i] = dictionary.decodeToFloat((int) dictionaryIds.vector[i]);
}
break;
case DOUBLE:
for (int i = rowId; i < rowId + num; ++i) {
((DoubleColumnVector) column).vector[i] = dictionary.decodeToDouble((int) dictionaryIds.vector[i]);
}
break;
case INT96:
final Calendar calendar;
if (Strings.isNullOrEmpty(this.conversionTimeZone)) {
// Local time should be used if no timezone is specified
calendar = Calendar.getInstance();
} else {
calendar = Calendar.getInstance(TimeZone.getTimeZone(this.conversionTimeZone));
}
for (int i = rowId; i < rowId + num; ++i) {
ByteBuffer buf = dictionary.decodeToBinary((int) dictionaryIds.vector[i]).toByteBuffer();
buf.order(ByteOrder.LITTLE_ENDIAN);
long timeOfDayNanos = buf.getLong();
int julianDay = buf.getInt();
NanoTime nt = new NanoTime(julianDay, timeOfDayNanos);
Timestamp ts = NanoTimeUtils.getTimestamp(nt, calendar);
((TimestampColumnVector) column).set(i, ts);
}
break;
case BINARY:
case FIXED_LEN_BYTE_ARRAY:
if (column instanceof BytesColumnVector) {
for (int i = rowId; i < rowId + num; ++i) {
((BytesColumnVector) column).setVal(i, dictionary.decodeToBinary((int) dictionaryIds.vector[i]).getBytesUnsafe());
}
} else {
DecimalColumnVector decimalColumnVector = ((DecimalColumnVector) column);
decimalColumnVector.precision = (short) type.asPrimitiveType().getDecimalMetadata().getPrecision();
decimalColumnVector.scale = (short) type.asPrimitiveType().getDecimalMetadata().getScale();
for (int i = rowId; i < rowId + num; ++i) {
decimalColumnVector.vector[i].set(dictionary.decodeToBinary((int) dictionaryIds.vector[i]).getBytesUnsafe(), decimalColumnVector.scale);
}
}
break;
default:
throw new UnsupportedOperationException("Unsupported type: " + descriptor.getType());
}
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorUDAFVarSampTimestamp method aggregateInputSelection.
@Override
public void aggregateInputSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, VectorizedRowBatch batch) throws HiveException {
inputExpression.evaluate(batch);
TimestampColumnVector inputColVector = (TimestampColumnVector) batch.cols[this.inputExpression.getOutputColumn()];
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
if (inputColVector.isRepeating) {
if (inputColVector.noNulls || !inputColVector.isNull[0]) {
iterateRepeatingNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector.getDouble(0), batchSize);
}
} else if (!batch.selectedInUse && inputColVector.noNulls) {
iterateNoSelectionNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize);
} else if (!batch.selectedInUse) {
iterateNoSelectionHasNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize, inputColVector.isNull);
} else if (inputColVector.noNulls) {
iterateSelectionNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize, batch.selected);
} else {
iterateSelectionHasNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize, inputColVector.isNull, batch.selected);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorUDAFAvgTimestamp method aggregateInputSelection.
@Override
public void aggregateInputSelection(VectorAggregationBufferRow[] aggregationBufferSets, int bufferIndex, VectorizedRowBatch batch) throws HiveException {
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
inputExpression.evaluate(batch);
TimestampColumnVector inputColVector = (TimestampColumnVector) batch.cols[this.inputExpression.getOutputColumn()];
if (inputColVector.noNulls) {
if (inputColVector.isRepeating) {
iterateNoNullsRepeatingWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector.getDouble(0), batchSize);
} else {
if (batch.selectedInUse) {
iterateNoNullsSelectionWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector, batch.selected, batchSize);
} else {
iterateNoNullsWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector, batchSize);
}
}
} else {
if (inputColVector.isRepeating) {
if (batch.selectedInUse) {
iterateHasNullsRepeatingSelectionWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector.getDouble(0), batchSize, batch.selected, inputColVector.isNull);
} else {
iterateHasNullsRepeatingWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector.getDouble(0), batchSize, inputColVector.isNull);
}
} else {
if (batch.selectedInUse) {
iterateHasNullsSelectionWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector, batchSize, batch.selected, inputColVector.isNull);
} else {
iterateHasNullsWithAggregationSelection(aggregationBufferSets, bufferIndex, inputColVector, batchSize, inputColVector.isNull);
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorUDAFStdPopTimestamp method aggregateInputSelection.
@Override
public void aggregateInputSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, VectorizedRowBatch batch) throws HiveException {
inputExpression.evaluate(batch);
TimestampColumnVector inputColVector = (TimestampColumnVector) batch.cols[this.inputExpression.getOutputColumn()];
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
if (inputColVector.isRepeating) {
if (inputColVector.noNulls || !inputColVector.isNull[0]) {
iterateRepeatingNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector.getDouble(0), batchSize);
}
} else if (!batch.selectedInUse && inputColVector.noNulls) {
iterateNoSelectionNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize);
} else if (!batch.selectedInUse) {
iterateNoSelectionHasNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize, inputColVector.isNull);
} else if (inputColVector.noNulls) {
iterateSelectionNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize, batch.selected);
} else {
iterateSelectionHasNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, inputColVector, batchSize, inputColVector.isNull, batch.selected);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.TimestampColumnVector in project hive by apache.
the class VectorUDAFStdPopTimestamp method aggregateInput.
@Override
public void aggregateInput(AggregationBuffer agg, VectorizedRowBatch batch) throws HiveException {
inputExpression.evaluate(batch);
TimestampColumnVector inputColVector = (TimestampColumnVector) batch.cols[this.inputExpression.getOutputColumn()];
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
Aggregation myagg = (Aggregation) agg;
if (inputColVector.isRepeating) {
if (inputColVector.noNulls) {
iterateRepeatingNoNulls(myagg, inputColVector.getDouble(0), batchSize);
}
} else if (!batch.selectedInUse && inputColVector.noNulls) {
iterateNoSelectionNoNulls(myagg, inputColVector, batchSize);
} else if (!batch.selectedInUse) {
iterateNoSelectionHasNulls(myagg, inputColVector, batchSize, inputColVector.isNull);
} else if (inputColVector.noNulls) {
iterateSelectionNoNulls(myagg, inputColVector, batchSize, batch.selected);
} else {
iterateSelectionHasNulls(myagg, inputColVector, batchSize, inputColVector.isNull, batch.selected);
}
}
Aggregations