Search in sources :

Example 1 with Decimal64ColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector in project hive by apache.

the class VectorUDAFSumDecimal64 method assignRowColumn.

@Override
public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum, AggregationBuffer agg) throws HiveException {
    Decimal64ColumnVector outputColVector = (Decimal64ColumnVector) batch.cols[columnNum];
    Aggregation myagg = (Aggregation) agg;
    if (myagg.isNull || myagg.isOverflowed) {
        outputColVector.noNulls = false;
        outputColVector.isNull[batchIndex] = true;
        return;
    }
    outputColVector.isNull[batchIndex] = false;
    outputColVector.vector[batchIndex] = myagg.sum;
}
Also used : Decimal64ColumnVector(org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector)

Example 2 with Decimal64ColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector in project hive by apache.

the class RecordReaderImpl method nextDecimal.

static HiveDecimalWritable nextDecimal(ColumnVector vector, int row, Object previous) {
    if (vector.isRepeating) {
        row = 0;
    }
    if (vector.noNulls || !vector.isNull[row]) {
        HiveDecimalWritable result;
        if (previous == null || previous.getClass() != HiveDecimalWritable.class) {
            result = new HiveDecimalWritable();
        } else {
            result = (HiveDecimalWritable) previous;
        }
        if (vector instanceof Decimal64ColumnVector) {
            long value = ((Decimal64ColumnVector) vector).vector[row];
            result.deserialize64(value, ((Decimal64ColumnVector) vector).scale);
        } else {
            result.set(((DecimalColumnVector) vector).vector[row]);
        }
        return result;
    } else {
        return null;
    }
}
Also used : Decimal64ColumnVector(org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector) DecimalColumnVector(org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector) HiveDecimalWritable(org.apache.hadoop.hive.serde2.io.HiveDecimalWritable)

Example 3 with Decimal64ColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector in project hive by apache.

the class BatchToRowReader method nextDecimal.

public static HiveDecimalWritable nextDecimal(ColumnVector vector, int row, Object previous) {
    if (vector.isRepeating) {
        row = 0;
    }
    if (vector.noNulls || !vector.isNull[row]) {
        HiveDecimalWritable result;
        if (previous == null || previous.getClass() != HiveDecimalWritable.class) {
            result = new HiveDecimalWritable();
        } else {
            result = (HiveDecimalWritable) previous;
        }
        if (vector instanceof Decimal64ColumnVector) {
            long value = ((Decimal64ColumnVector) vector).vector[row];
            result.deserialize64(value, ((Decimal64ColumnVector) vector).scale);
        } else {
            result.set(((DecimalColumnVector) vector).vector[row]);
        }
        return result;
    } else {
        return null;
    }
}
Also used : Decimal64ColumnVector(org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector) DecimalColumnVector(org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector) HiveDecimalWritable(org.apache.hadoop.hive.serde2.io.HiveDecimalWritable)

Example 4 with Decimal64ColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector in project hive by apache.

the class EncodedTreeReaderFactory method getPrimitiveTreeReader.

private static TreeReader getPrimitiveTreeReader(final int columnIndex, TypeDescription columnType, CompressionCodec codec, OrcProto.ColumnEncoding columnEncoding, ColumnStreamData present, ColumnStreamData data, ColumnStreamData dictionary, ColumnStreamData lengths, ColumnStreamData secondary, Context context, List<ColumnVector> vectors, final boolean useDecimal64ColumnVectors) throws IOException {
    switch(columnType.getCategory()) {
        case BINARY:
            return BinaryStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setLengthStream(lengths).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
        case BOOLEAN:
            return BooleanStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setVectors(vectors).build();
        case BYTE:
            return ByteStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setVectors(vectors).build();
        case SHORT:
            return ShortStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
        case INT:
            return IntStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
        case LONG:
            return LongStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
        case FLOAT:
            return FloatStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setVectors(vectors).build();
        case DOUBLE:
            return DoubleStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setVectors(vectors).build();
        case CHAR:
            return CharStreamReader.builder().setColumnIndex(columnIndex).setMaxLength(columnType.getMaxLength()).setPresentStream(present).setDataStream(data).setLengthStream(lengths).setDictionaryStream(dictionary).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).build();
        case VARCHAR:
            return VarcharStreamReader.builder().setColumnIndex(columnIndex).setMaxLength(columnType.getMaxLength()).setPresentStream(present).setDataStream(data).setLengthStream(lengths).setDictionaryStream(dictionary).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).build();
        case STRING:
            return StringStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setLengthStream(lengths).setDictionaryStream(dictionary).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).build();
        case DECIMAL:
            // special handling for serde reader (text) in llap IO.
            // if file format version is null, then we are processing text IF in LLAP IO, in which case
            // we get vectors instead of streams. If vectors contain instance of Decimal64ColumnVector we
            // should use Decimal64StreamReader (which acts as a wrapper around vectors)
            boolean useDecimal64Reader = context.getFileFormat() == null && vectors != null && useDecimal64ColumnVectors;
            if (useDecimal64Reader) {
                boolean containDecimal64CV = false;
                for (ColumnVector vector : vectors) {
                    if (vector instanceof Decimal64ColumnVector) {
                        containDecimal64CV = true;
                        break;
                    }
                }
                useDecimal64Reader &= containDecimal64CV;
            }
            if ((context.getFileFormat() == OrcFile.Version.UNSTABLE_PRE_2_0 || useDecimal64Reader) && columnType.getPrecision() <= TypeDescription.MAX_DECIMAL64_PRECISION) {
                return Decimal64StreamReader.builder().setColumnIndex(columnIndex).setPrecision(columnType.getPrecision()).setScale(columnType.getScale()).setPresentStream(present).setValueStream(data).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
            } else {
                return DecimalStreamReader.builder().setColumnIndex(columnIndex).setPrecision(columnType.getPrecision()).setScale(columnType.getScale()).setPresentStream(present).setValueStream(data).setScaleStream(secondary).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
            }
        case TIMESTAMP:
            return TimestampStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setSecondsStream(data).setNanosStream(secondary).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).setIsInstant(false).build();
        case DATE:
            return DateStreamReader.builder().setColumnIndex(columnIndex).setPresentStream(present).setDataStream(data).setCompressionCodec(codec).setColumnEncoding(columnEncoding).setVectors(vectors).setContext(context).build();
        default:
            throw new AssertionError("Not a primitive category: " + columnType.getCategory());
    }
}
Also used : Decimal64ColumnVector(org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector) ColumnVector(org.apache.hadoop.hive.ql.exec.vector.ColumnVector) Decimal64ColumnVector(org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector)

Example 5 with Decimal64ColumnVector

use of org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector in project hive by apache.

the class CastLongToDecimal64 method evaluate.

@Override
public void evaluate(VectorizedRowBatch batch) throws HiveException {
    if (childExpressions != null) {
        super.evaluateChildren(batch);
    }
    LongColumnVector inputColVector = (LongColumnVector) batch.cols[inputColumnNum[0]];
    int[] sel = batch.selected;
    int n = batch.size;
    Decimal64ColumnVector outputColVector = (Decimal64ColumnVector) batch.cols[outputColumnNum];
    int outputScale = ((DecimalTypeInfo) outputTypeInfo).scale();
    final long scaleFactor = powerOfTenTable[outputScale];
    boolean[] inputIsNull = inputColVector.isNull;
    boolean[] outputIsNull = outputColVector.isNull;
    if (n == 0) {
        // Nothing to do
        return;
    }
    // We do not need to do a column reset since we are carefully changing the output.
    outputColVector.isRepeating = false;
    if (inputColVector.isRepeating) {
        if (inputColVector.noNulls || !inputIsNull[0]) {
            // Set isNull before call in case it changes it mind.
            outputIsNull[0] = false;
            scaleUp(outputColVector, inputColVector, 0, scaleFactor);
        } else {
            outputIsNull[0] = true;
            outputColVector.noNulls = false;
        }
        outputColVector.isRepeating = true;
        return;
    }
    if (inputColVector.noNulls) {
        if (batch.selectedInUse) {
            if (!outputColVector.noNulls) {
                for (int j = 0; j != n; j++) {
                    final int i = sel[j];
                    // Set isNull before call in case it changes it mind.
                    outputIsNull[i] = false;
                    scaleUp(outputColVector, inputColVector, i, scaleFactor);
                }
            } else {
                for (int j = 0; j != n; j++) {
                    final int i = sel[j];
                    scaleUp(outputColVector, inputColVector, i, scaleFactor);
                }
            }
        } else {
            if (!outputColVector.noNulls) {
                // Assume it is almost always a performance win to fill all of isNull so we can
                // safely reset noNulls.
                Arrays.fill(outputIsNull, false);
                outputColVector.noNulls = true;
            }
            for (int i = 0; i != n; i++) {
                scaleUp(outputColVector, inputColVector, i, scaleFactor);
            }
        }
    } else /* there are nulls in the inputColVector */
    {
        // Carefully handle NULLs...
        outputColVector.noNulls = false;
        if (batch.selectedInUse) {
            for (int j = 0; j != n; j++) {
                int i = sel[j];
                outputColVector.isNull[i] = inputColVector.isNull[i];
                if (!inputColVector.isNull[i]) {
                    scaleUp(outputColVector, inputColVector, i, scaleFactor);
                }
            }
        } else {
            System.arraycopy(inputColVector.isNull, 0, outputColVector.isNull, 0, n);
            for (int i = 0; i != n; i++) {
                if (!inputColVector.isNull[i]) {
                    scaleUp(outputColVector, inputColVector, i, scaleFactor);
                }
            }
        }
    }
}
Also used : DecimalTypeInfo(org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo) Decimal64ColumnVector(org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector) LongColumnVector(org.apache.hadoop.hive.ql.exec.vector.LongColumnVector)

Aggregations

Decimal64ColumnVector (org.apache.hadoop.hive.ql.exec.vector.Decimal64ColumnVector)9 DecimalColumnVector (org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector)2 HiveDecimalWritable (org.apache.hadoop.hive.serde2.io.HiveDecimalWritable)2 ColumnVector (org.apache.hadoop.hive.ql.exec.vector.ColumnVector)1 LongColumnVector (org.apache.hadoop.hive.ql.exec.vector.LongColumnVector)1 DecimalTypeInfo (org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo)1