Search in sources :

Example 1 with DoubleType

use of org.apache.spark.sql.types.DoubleType in project carbondata by apache.

the class SafeVariableLengthDimensionDataChunkStore method fillRow.

@Override
public void fillRow(int rowId, CarbonColumnVector vector, int vectorRow) {
    // if column was explicitly sorted we need to get the rowid based inverted index reverse
    if (isExplictSorted) {
        rowId = invertedIndexReverse[rowId];
    }
    // now to get the row from memory block we need to do following thing
    // 1. first get the current offset
    // 2. if it's not a last row- get the next row offset
    // Subtract the current row offset + 2 bytes(to skip the data length) with next row offset
    // else subtract the current row offset with complete data
    // length get the offset of set of data
    int currentDataOffset = dataOffsets[rowId];
    short length = 0;
    // calculating the length of data
    if (rowId < numberOfRows - 1) {
        length = (short) (dataOffsets[rowId + 1] - (currentDataOffset + CarbonCommonConstants.SHORT_SIZE_IN_BYTE));
    } else {
        // for last record
        length = (short) (this.data.length - currentDataOffset);
    }
    if (ByteUtil.UnsafeComparer.INSTANCE.equals(CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY, 0, CarbonCommonConstants.MEMBER_DEFAULT_VAL_ARRAY.length, data, currentDataOffset, length)) {
        vector.putNull(vectorRow);
    } else {
        DataType dt = vector.getType();
        if (dt instanceof StringType) {
            vector.putBytes(vectorRow, currentDataOffset, length, data);
        } else if (dt instanceof BooleanType) {
            vector.putBoolean(vectorRow, ByteUtil.toBoolean(data[currentDataOffset]));
        } else if (dt instanceof ShortType) {
            vector.putShort(vectorRow, ByteUtil.toShort(data, currentDataOffset, length));
        } else if (dt instanceof IntegerType) {
            vector.putInt(vectorRow, ByteUtil.toInt(data, currentDataOffset, length));
        } else if (dt instanceof FloatType) {
            vector.putFloat(vectorRow, ByteUtil.toFloat(data, currentDataOffset));
        } else if (dt instanceof DoubleType) {
            vector.putDouble(vectorRow, ByteUtil.toDouble(data, currentDataOffset));
        } else if (dt instanceof LongType) {
            vector.putLong(vectorRow, ByteUtil.toLong(data, currentDataOffset, length));
        } else if (dt instanceof DecimalType) {
            vector.putDecimal(vectorRow, Decimal.apply(ByteUtil.toBigDecimal(data, currentDataOffset, length)), DecimalType.MAX_PRECISION());
        }
    }
}
Also used : IntegerType(org.apache.spark.sql.types.IntegerType) LongType(org.apache.spark.sql.types.LongType) StringType(org.apache.spark.sql.types.StringType) DoubleType(org.apache.spark.sql.types.DoubleType) ShortType(org.apache.spark.sql.types.ShortType) BooleanType(org.apache.spark.sql.types.BooleanType) DataType(org.apache.spark.sql.types.DataType) DecimalType(org.apache.spark.sql.types.DecimalType) FloatType(org.apache.spark.sql.types.FloatType)

Aggregations

BooleanType (org.apache.spark.sql.types.BooleanType)1 DataType (org.apache.spark.sql.types.DataType)1 DecimalType (org.apache.spark.sql.types.DecimalType)1 DoubleType (org.apache.spark.sql.types.DoubleType)1 FloatType (org.apache.spark.sql.types.FloatType)1 IntegerType (org.apache.spark.sql.types.IntegerType)1 LongType (org.apache.spark.sql.types.LongType)1 ShortType (org.apache.spark.sql.types.ShortType)1 StringType (org.apache.spark.sql.types.StringType)1