use of org.apache.hadoop.hive.serde2.io.HiveDecimalWritable in project hive by apache.
the class VectorColumnAssignFactory method buildObjectAssign.
public static VectorColumnAssign buildObjectAssign(VectorizedRowBatch outputBatch, int outColIndex, PrimitiveCategory category) throws HiveException {
VectorColumnAssign outVCA = null;
ColumnVector destCol = outputBatch.cols[outColIndex];
if (destCol == null) {
switch(category) {
case VOID:
outVCA = new VectorLongColumnAssign() {
// This is a dummy assigner
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
// This is no-op, there is no column to assign to and val is expected to be null
assert (val == null);
}
};
break;
default:
throw new HiveException("Incompatible (null) vector column and primitive category " + category);
}
} else if (destCol instanceof LongColumnVector) {
switch(category) {
case BOOLEAN:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
BooleanWritable bw = (BooleanWritable) val;
assignLong(bw.get() ? 1 : 0, destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case BYTE:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
ByteWritable bw = (ByteWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case SHORT:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
ShortWritable bw = (ShortWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case INT:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
IntWritable bw = (IntWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case LONG:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
LongWritable bw = (LongWritable) val;
assignLong(bw.get(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case TIMESTAMP:
outVCA = new VectorTimestampColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
assignTimestamp((TimestampWritable) val, destIndex);
}
}
}.init(outputBatch, (TimestampColumnVector) destCol);
break;
case DATE:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
DateWritable bw = (DateWritable) val;
assignLong(bw.getDays(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case INTERVAL_YEAR_MONTH:
outVCA = new VectorLongColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
HiveIntervalYearMonthWritable bw = (HiveIntervalYearMonthWritable) val;
assignLong(bw.getHiveIntervalYearMonth().getTotalMonths(), destIndex);
}
}
}.init(outputBatch, (LongColumnVector) destCol);
break;
case INTERVAL_DAY_TIME:
outVCA = new VectorIntervalDayTimeColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
HiveIntervalDayTimeWritable bw = (HiveIntervalDayTimeWritable) val;
assignIntervalDayTime(bw.getHiveIntervalDayTime(), destIndex);
}
}
}.init(outputBatch, (IntervalDayTimeColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Long vector column and primitive category " + category);
}
} else if (destCol instanceof DoubleColumnVector) {
switch(category) {
case DOUBLE:
outVCA = new VectorDoubleColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
DoubleWritable bw = (DoubleWritable) val;
assignDouble(bw.get(), destIndex);
}
}
}.init(outputBatch, (DoubleColumnVector) destCol);
break;
case FLOAT:
outVCA = new VectorDoubleColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
FloatWritable bw = (FloatWritable) val;
assignDouble(bw.get(), destIndex);
}
}
}.init(outputBatch, (DoubleColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Double vector column and primitive category " + category);
}
} else if (destCol instanceof BytesColumnVector) {
switch(category) {
case BINARY:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
BytesWritable bw = (BytesWritable) val;
byte[] bytes = bw.getBytes();
assignBytes(bytes, 0, bw.getLength(), destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
case STRING:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
Text bw = (Text) val;
byte[] bytes = bw.getBytes();
assignBytes(bytes, 0, bw.getLength(), destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
case VARCHAR:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
// We store VARCHAR type stripped of pads.
HiveVarchar hiveVarchar;
if (val instanceof HiveVarchar) {
hiveVarchar = (HiveVarchar) val;
} else {
hiveVarchar = ((HiveVarcharWritable) val).getHiveVarchar();
}
byte[] bytes = hiveVarchar.getValue().getBytes();
assignBytes(bytes, 0, bytes.length, destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
case CHAR:
outVCA = new VectorBytesColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
// We store CHAR type stripped of pads.
HiveChar hiveChar;
if (val instanceof HiveChar) {
hiveChar = (HiveChar) val;
} else {
hiveChar = ((HiveCharWritable) val).getHiveChar();
}
byte[] bytes = hiveChar.getStrippedValue().getBytes();
assignBytes(bytes, 0, bytes.length, destIndex);
}
}
}.init(outputBatch, (BytesColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Bytes vector column and primitive category " + category);
}
} else if (destCol instanceof DecimalColumnVector) {
switch(category) {
case DECIMAL:
outVCA = new VectorDecimalColumnAssign() {
@Override
public void assignObjectValue(Object val, int destIndex) throws HiveException {
if (val == null) {
assignNull(destIndex);
} else {
if (val instanceof HiveDecimal) {
assignDecimal((HiveDecimal) val, destIndex);
} else {
assignDecimal((HiveDecimalWritable) val, destIndex);
}
}
}
}.init(outputBatch, (DecimalColumnVector) destCol);
break;
default:
throw new HiveException("Incompatible Decimal vector column and primitive category " + category);
}
} else {
throw new HiveException("Unknown vector column type " + destCol.getClass().getName());
}
return outVCA;
}
use of org.apache.hadoop.hive.serde2.io.HiveDecimalWritable in project hive by apache.
the class VectorDeserializeRow method convertRowColumn.
/**
* Convert one row column value that is the current value in deserializeRead.
*
* We deserialize into a writable and then pass that writable to an instance of VectorAssignRow
* to convert the writable to the target data type and assign it into the VectorizedRowBatch.
*
* @param batch
* @param batchIndex
* @param logicalColumnIndex
* @throws IOException
*/
private void convertRowColumn(VectorizedRowBatch batch, int batchIndex, int logicalColumnIndex) throws IOException {
final int projectionColumnNum = projectionColumnNums[logicalColumnIndex];
Writable convertSourceWritable = convertSourceWritables[logicalColumnIndex];
switch(sourceCategories[logicalColumnIndex]) {
case PRIMITIVE:
{
switch(sourcePrimitiveCategories[logicalColumnIndex]) {
case VOID:
convertSourceWritable = null;
break;
case BOOLEAN:
((BooleanWritable) convertSourceWritable).set(deserializeRead.currentBoolean);
break;
case BYTE:
((ByteWritable) convertSourceWritable).set(deserializeRead.currentByte);
break;
case SHORT:
((ShortWritable) convertSourceWritable).set(deserializeRead.currentShort);
break;
case INT:
((IntWritable) convertSourceWritable).set(deserializeRead.currentInt);
break;
case LONG:
((LongWritable) convertSourceWritable).set(deserializeRead.currentLong);
break;
case TIMESTAMP:
((TimestampWritable) convertSourceWritable).set(deserializeRead.currentTimestampWritable);
break;
case DATE:
((DateWritable) convertSourceWritable).set(deserializeRead.currentDateWritable);
break;
case FLOAT:
((FloatWritable) convertSourceWritable).set(deserializeRead.currentFloat);
break;
case DOUBLE:
((DoubleWritable) convertSourceWritable).set(deserializeRead.currentDouble);
break;
case BINARY:
if (deserializeRead.currentBytes == null) {
LOG.info("null binary entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
((BytesWritable) convertSourceWritable).set(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength);
break;
case STRING:
if (deserializeRead.currentBytes == null) {
throw new RuntimeException("null string entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
// Use org.apache.hadoop.io.Text as our helper to go from byte[] to String.
((Text) convertSourceWritable).set(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength);
break;
case VARCHAR:
{
// that does not use Java String objects.
if (deserializeRead.currentBytes == null) {
throw new RuntimeException("null varchar entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
int adjustedLength = StringExpr.truncate(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength, maxLengths[logicalColumnIndex]);
((HiveVarcharWritable) convertSourceWritable).set(new String(deserializeRead.currentBytes, deserializeRead.currentBytesStart, adjustedLength, Charsets.UTF_8), -1);
}
break;
case CHAR:
{
// that does not use Java String objects.
if (deserializeRead.currentBytes == null) {
throw new RuntimeException("null char entry: batchIndex " + batchIndex + " projection column num " + projectionColumnNum);
}
int adjustedLength = StringExpr.rightTrimAndTruncate(deserializeRead.currentBytes, deserializeRead.currentBytesStart, deserializeRead.currentBytesLength, maxLengths[logicalColumnIndex]);
((HiveCharWritable) convertSourceWritable).set(new String(deserializeRead.currentBytes, deserializeRead.currentBytesStart, adjustedLength, Charsets.UTF_8), -1);
}
break;
case DECIMAL:
((HiveDecimalWritable) convertSourceWritable).set(deserializeRead.currentHiveDecimalWritable);
break;
case INTERVAL_YEAR_MONTH:
((HiveIntervalYearMonthWritable) convertSourceWritable).set(deserializeRead.currentHiveIntervalYearMonthWritable);
break;
case INTERVAL_DAY_TIME:
((HiveIntervalDayTimeWritable) convertSourceWritable).set(deserializeRead.currentHiveIntervalDayTimeWritable);
break;
default:
throw new RuntimeException("Primitive category " + sourcePrimitiveCategories[logicalColumnIndex] + " not supported");
}
}
break;
default:
throw new RuntimeException("Category " + sourceCategories[logicalColumnIndex] + " not supported");
}
/*
* Convert our source object we just read into the target object and store that in the
* VectorizedRowBatch.
*/
convertVectorAssignRow.assignConvertRowColumn(batch, batchIndex, logicalColumnIndex, convertSourceWritable);
}
use of org.apache.hadoop.hive.serde2.io.HiveDecimalWritable in project presto by prestodb.
the class ParquetShortDecimalColumnReader method readValue.
@Override
protected void readValue(BlockBuilder blockBuilder, Type type) {
if (definitionLevel == columnDescriptor.getMaxDefinitionLevel()) {
long decimalValue;
if (columnDescriptor.getType().equals(INT32)) {
HiveDecimalWritable hiveDecimalWritable = new HiveDecimalWritable(HiveDecimal.create(valuesReader.readInteger()));
decimalValue = getShortDecimalValue(hiveDecimalWritable, ((DecimalType) type).getScale());
} else if (columnDescriptor.getType().equals(INT64)) {
HiveDecimalWritable hiveDecimalWritable = new HiveDecimalWritable(HiveDecimal.create(valuesReader.readLong()));
decimalValue = getShortDecimalValue(hiveDecimalWritable, ((DecimalType) type).getScale());
} else {
decimalValue = getShortDecimalValue(valuesReader.readBytes().getBytes());
}
type.writeLong(blockBuilder, decimalValue);
} else {
blockBuilder.appendNull();
}
}
use of org.apache.hadoop.hive.serde2.io.HiveDecimalWritable in project hive by apache.
the class VectorPTFEvaluatorDecimalMin method evaluateGroupBatch.
public void evaluateGroupBatch(VectorizedRowBatch batch, boolean isLastGroupBatch) {
evaluateInputExpr(batch);
// Determine minimum of all non-null decimal column values; maintain isGroupResultNull.
// We do not filter when PTF is in reducer.
Preconditions.checkState(!batch.selectedInUse);
final int size = batch.size;
if (size == 0) {
return;
}
DecimalColumnVector decimalColVector = ((DecimalColumnVector) batch.cols[inputColumnNum]);
if (decimalColVector.isRepeating) {
if (decimalColVector.noNulls || !decimalColVector.isNull[0]) {
if (isGroupResultNull) {
min.set(decimalColVector.vector[0]);
isGroupResultNull = false;
} else {
HiveDecimalWritable repeatedMin = decimalColVector.vector[0];
if (repeatedMin.compareTo(min) == -1) {
min.set(repeatedMin);
}
}
}
} else if (decimalColVector.noNulls) {
HiveDecimalWritable[] vector = decimalColVector.vector;
if (isGroupResultNull) {
min.set(vector[0]);
isGroupResultNull = false;
} else {
final HiveDecimalWritable dec = vector[0];
if (dec.compareTo(min) == -1) {
min.set(dec);
}
}
for (int i = 1; i < size; i++) {
final HiveDecimalWritable dec = vector[i];
if (dec.compareTo(min) == -1) {
min.set(dec);
}
}
} else {
boolean[] batchIsNull = decimalColVector.isNull;
int i = 0;
while (batchIsNull[i]) {
if (++i >= size) {
return;
}
}
HiveDecimalWritable[] vector = decimalColVector.vector;
if (isGroupResultNull) {
min.set(vector[i++]);
isGroupResultNull = false;
} else {
final HiveDecimalWritable dec = vector[i++];
if (dec.compareTo(min) == -1) {
min.set(dec);
}
}
for (; i < size; i++) {
if (!batchIsNull[i]) {
final HiveDecimalWritable dec = vector[i];
if (dec.compareTo(min) == -1) {
min.set(dec);
}
}
}
}
}
use of org.apache.hadoop.hive.serde2.io.HiveDecimalWritable in project hive by apache.
the class DruidSerDe method deserialize.
@Override
public Object deserialize(Writable writable) throws SerDeException {
final DruidWritable input = (DruidWritable) writable;
final List<Object> output = Lists.newArrayListWithExpectedSize(columns.length);
for (int i = 0; i < columns.length; i++) {
final Object value = input.getValue().get(columns[i]);
if (value == null) {
output.add(null);
continue;
}
switch(types[i].getPrimitiveCategory()) {
case TIMESTAMP:
output.add(new TimestampWritable(Timestamp.valueOf(ZonedDateTime.ofInstant(Instant.ofEpochMilli(((Number) value).longValue()), tsTZTypeInfo.timeZone()).format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")).toString())));
break;
case TIMESTAMPLOCALTZ:
output.add(new TimestampLocalTZWritable(new TimestampTZ(ZonedDateTime.ofInstant(Instant.ofEpochMilli(((Number) value).longValue()), ((TimestampLocalTZTypeInfo) types[i]).timeZone()))));
break;
case BYTE:
output.add(new ByteWritable(((Number) value).byteValue()));
break;
case SHORT:
output.add(new ShortWritable(((Number) value).shortValue()));
break;
case INT:
output.add(new IntWritable(((Number) value).intValue()));
break;
case LONG:
output.add(new LongWritable(((Number) value).longValue()));
break;
case FLOAT:
output.add(new FloatWritable(((Number) value).floatValue()));
break;
case DOUBLE:
output.add(new DoubleWritable(((Number) value).doubleValue()));
break;
case DECIMAL:
output.add(new HiveDecimalWritable(HiveDecimal.create(((Number) value).doubleValue())));
break;
case CHAR:
output.add(new HiveCharWritable(new HiveChar(value.toString(), ((CharTypeInfo) types[i]).getLength())));
break;
case VARCHAR:
output.add(new HiveVarcharWritable(new HiveVarchar(value.toString(), ((VarcharTypeInfo) types[i]).getLength())));
break;
case STRING:
output.add(new Text(value.toString()));
break;
case BOOLEAN:
output.add(new BooleanWritable(Boolean.valueOf(value.toString())));
break;
default:
throw new SerDeException("Unknown type: " + types[i].getPrimitiveCategory());
}
}
return output;
}
Aggregations