use of org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector in project hive by apache.
the class FilterStructColumnInList method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) throws HiveException {
final int logicalSize = batch.size;
if (logicalSize == 0) {
return;
}
if (buffer == null) {
buffer = new Output();
binarySortableSerializeWrite = new BinarySortableSerializeWrite(structColumnMap.length);
}
for (VectorExpression ve : structExpressions) {
ve.evaluate(batch);
}
BytesColumnVector scratchBytesColumnVector = (BytesColumnVector) batch.cols[scratchBytesColumn];
try {
boolean selectedInUse = batch.selectedInUse;
int[] selected = batch.selected;
for (int logical = 0; logical < logicalSize; logical++) {
int batchIndex = (selectedInUse ? selected[logical] : logical);
binarySortableSerializeWrite.set(buffer);
for (int f = 0; f < structColumnMap.length; f++) {
int fieldColumn = structColumnMap[f];
ColumnVector colVec = batch.cols[fieldColumn];
int adjustedIndex = (colVec.isRepeating ? 0 : batchIndex);
if (colVec.noNulls || !colVec.isNull[adjustedIndex]) {
switch(fieldVectorColumnTypes[f]) {
case BYTES:
{
BytesColumnVector bytesColVec = (BytesColumnVector) colVec;
byte[] bytes = bytesColVec.vector[adjustedIndex];
int start = bytesColVec.start[adjustedIndex];
int length = bytesColVec.length[adjustedIndex];
binarySortableSerializeWrite.writeString(bytes, start, length);
}
break;
case LONG:
binarySortableSerializeWrite.writeLong(((LongColumnVector) colVec).vector[adjustedIndex]);
break;
case DOUBLE:
binarySortableSerializeWrite.writeDouble(((DoubleColumnVector) colVec).vector[adjustedIndex]);
break;
case DECIMAL:
DecimalColumnVector decColVector = ((DecimalColumnVector) colVec);
binarySortableSerializeWrite.writeHiveDecimal(decColVector.vector[adjustedIndex], decColVector.scale);
break;
default:
throw new RuntimeException("Unexpected vector column type " + fieldVectorColumnTypes[f].name());
}
} else {
binarySortableSerializeWrite.writeNull();
}
}
scratchBytesColumnVector.setVal(batchIndex, buffer.getData(), 0, buffer.getLength());
}
// Now, take the serialized keys we just wrote into our scratch column and look them
// up in the IN list.
super.evaluate(batch);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector in project hive by apache.
the class FuncLongToDecimal method evaluate.
@Override
public void evaluate(VectorizedRowBatch batch) throws HiveException {
if (childExpressions != null) {
super.evaluateChildren(batch);
}
LongColumnVector inputColVector = (LongColumnVector) batch.cols[inputColumnNum[0]];
int[] sel = batch.selected;
int n = batch.size;
DecimalColumnVector outputColVector = (DecimalColumnVector) batch.cols[outputColumnNum];
boolean[] inputIsNull = inputColVector.isNull;
boolean[] outputIsNull = outputColVector.isNull;
if (n == 0) {
// Nothing to do
return;
}
// We do not need to do a column reset since we are carefully changing the output.
outputColVector.isRepeating = false;
if (inputColVector.isRepeating) {
if (inputColVector.noNulls || !inputIsNull[0]) {
// Set isNull before call in case it changes it mind.
outputIsNull[0] = false;
func(outputColVector, inputColVector, 0);
} else {
outputIsNull[0] = true;
outputColVector.noNulls = false;
}
outputColVector.isRepeating = true;
return;
}
if (inputColVector.noNulls) {
if (batch.selectedInUse) {
if (!outputColVector.noNulls) {
for (int j = 0; j != n; j++) {
final int i = sel[j];
// Set isNull before call in case it changes it mind.
outputIsNull[i] = false;
func(outputColVector, inputColVector, i);
}
} else {
for (int j = 0; j != n; j++) {
final int i = sel[j];
func(outputColVector, inputColVector, i);
}
}
} else {
if (!outputColVector.noNulls) {
// Assume it is almost always a performance win to fill all of isNull so we can
// safely reset noNulls.
Arrays.fill(outputIsNull, false);
outputColVector.noNulls = true;
}
for (int i = 0; i != n; i++) {
func(outputColVector, inputColVector, i);
}
}
} else /* there are nulls in the inputColVector */
{
// Carefully handle NULLs...
outputColVector.noNulls = false;
if (batch.selectedInUse) {
for (int j = 0; j != n; j++) {
int i = sel[j];
outputColVector.isNull[i] = inputColVector.isNull[i];
if (!inputColVector.isNull[i]) {
func(outputColVector, inputColVector, i);
}
}
} else {
System.arraycopy(inputColVector.isNull, 0, outputColVector.isNull, 0, n);
for (int i = 0; i != n; i++) {
if (!inputColVector.isNull[i]) {
func(outputColVector, inputColVector, i);
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector in project hive by apache.
the class VectorUDAFComputeBitVectorDecimal method aggregateInputSelection.
@Override
public void aggregateInputSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, VectorizedRowBatch batch) throws HiveException {
inputExpression.evaluate(batch);
DecimalColumnVector inputColumn = (DecimalColumnVector) batch.cols[this.inputExpression.getOutputColumnNum()];
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
if (inputColumn.noNulls) {
if (inputColumn.isRepeating) {
for (int i = 0; i < batchSize; i++) {
Aggregation myagg = getAggregation(aggregationBufferSets, i, aggregateIndex);
myagg.estimator.addToEstimator(inputColumn.vector[0].doubleValue());
}
} else {
if (batch.selectedInUse) {
for (int s = 0; s < batchSize; s++) {
int i = batch.selected[s];
Aggregation myagg = getAggregation(aggregationBufferSets, s, aggregateIndex);
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
} else {
for (int i = 0; i < batchSize; i++) {
Aggregation myagg = getAggregation(aggregationBufferSets, i, aggregateIndex);
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
}
}
} else {
if (inputColumn.isRepeating) {
if (!inputColumn.isNull[0]) {
for (int i = 0; i < batchSize; i++) {
Aggregation myagg = getAggregation(aggregationBufferSets, i, aggregateIndex);
myagg.estimator.addToEstimator(inputColumn.vector[0].doubleValue());
}
}
} else {
if (batch.selectedInUse) {
for (int s = 0; s < batchSize; s++) {
int i = batch.selected[s];
if (!inputColumn.isNull[i]) {
Aggregation myagg = getAggregation(aggregationBufferSets, s, aggregateIndex);
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!inputColumn.isNull[i]) {
Aggregation myagg = getAggregation(aggregationBufferSets, i, aggregateIndex);
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector in project hive by apache.
the class VectorUDAFComputeBitVectorDecimal method aggregateInput.
@Override
public void aggregateInput(AggregationBuffer agg, VectorizedRowBatch batch) throws HiveException {
inputExpression.evaluate(batch);
DecimalColumnVector inputColumn = (DecimalColumnVector) batch.cols[this.inputExpression.getOutputColumnNum()];
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
Aggregation myagg = (Aggregation) agg;
myagg.prepare();
if (inputColumn.noNulls) {
if (inputColumn.isRepeating) {
myagg.estimator.addToEstimator(inputColumn.vector[0].doubleValue());
} else {
if (batch.selectedInUse) {
for (int s = 0; s < batchSize; s++) {
int i = batch.selected[s];
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
} else {
for (int i = 0; i < batchSize; i++) {
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
}
}
} else {
if (inputColumn.isRepeating) {
if (!inputColumn.isNull[0]) {
myagg.estimator.addToEstimator(inputColumn.vector[0].doubleValue());
}
} else {
if (batch.selectedInUse) {
for (int j = 0; j < batchSize; ++j) {
int i = batch.selected[j];
if (!inputColumn.isNull[i]) {
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
}
} else {
for (int i = 0; i < batchSize; i++) {
if (!inputColumn.isNull[i]) {
myagg.estimator.addToEstimator(inputColumn.vector[i].doubleValue());
}
}
}
}
}
}
use of org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector in project hive by apache.
the class VectorUDAFSumDecimal method aggregateInputSelection.
@Override
public void aggregateInputSelection(VectorAggregationBufferRow[] aggregationBufferSets, int aggregateIndex, VectorizedRowBatch batch) throws HiveException {
int batchSize = batch.size;
if (batchSize == 0) {
return;
}
inputExpression.evaluate(batch);
DecimalColumnVector inputVector = (DecimalColumnVector) batch.cols[this.inputExpression.getOutputColumnNum()];
HiveDecimalWritable[] vector = inputVector.vector;
if (inputVector.noNulls) {
if (inputVector.isRepeating) {
iterateNoNullsRepeatingWithAggregationSelection(aggregationBufferSets, aggregateIndex, vector[0], batchSize);
} else {
if (batch.selectedInUse) {
iterateNoNullsSelectionWithAggregationSelection(aggregationBufferSets, aggregateIndex, vector, batch.selected, batchSize);
} else {
iterateNoNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, vector, batchSize);
}
}
} else {
if (inputVector.isRepeating) {
iterateHasNullsRepeatingWithAggregationSelection(aggregationBufferSets, aggregateIndex, vector[0], batchSize, inputVector.isNull);
} else {
if (batch.selectedInUse) {
iterateHasNullsSelectionWithAggregationSelection(aggregationBufferSets, aggregateIndex, vector, batchSize, batch.selected, inputVector.isNull);
} else {
iterateHasNullsWithAggregationSelection(aggregationBufferSets, aggregateIndex, vector, batchSize, inputVector.isNull);
}
}
}
}
Aggregations