use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class UDFHour method evaluate.
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
switch(inputTypes[0]) {
case INTERVAL_DAY_TIME:
HiveIntervalDayTime intervalDayTime = getIntervalDayTimeValue(arguments, 0, inputTypes, converters);
if (intervalDayTime == null) {
return null;
}
output.set(intervalDayTime.getHours());
break;
case STRING:
case CHAR:
case VARCHAR:
case DATE:
case TIMESTAMP:
case TIMESTAMPLOCALTZ:
case VOID:
Timestamp ts = getTimestampValue(arguments, 0, converters);
if (ts == null) {
return null;
}
calendar.setTimeInMillis(ts.toEpochMilli());
output.set(calendar.get(Calendar.HOUR_OF_DAY));
}
return output;
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class GenericUDFFromUtcTimestamp method evaluate.
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
Object o0 = arguments[0].get();
if (o0 == null) {
return null;
}
Object o1 = arguments[1].get();
if (o1 == null) {
return null;
}
Object converted_o0 = timestampConverter.convert(o0);
if (converted_o0 == null) {
return null;
}
Timestamp inputTs = ((TimestampWritableV2) converted_o0).getTimestamp();
String tzStr = textConverter.convert(o1).toString();
TimeZone timezone = TimeZone.getTimeZone(tzStr);
TimeZone fromTz;
TimeZone toTz;
if (invert()) {
fromTz = timezone;
toTz = tzUTC;
} else {
fromTz = tzUTC;
toTz = timezone;
}
// inputTs is the year/month/day/hour/minute/second in the local timezone.
// For this UDF we want it in the timezone represented by fromTz
TimestampTZ fromTs = TimestampTZUtil.parse(inputTs.toString(), fromTz.toZoneId());
if (fromTs == null) {
return null;
}
// Now output this timestamp's millis value to the equivalent toTz.
Timestamp result = Timestamp.valueOf(fromTs.getZonedDateTime().withZoneSameInstant(toTz.toZoneId()).toLocalDateTime().toString());
return result;
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class GenericUDFInBloomFilter method evaluate.
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
// Return if either of the arguments is null
if (arguments[0].get() == null || arguments[1].get() == null) {
return null;
}
if (!initializedBloomFilter) {
// Setup the bloom filter once
InputStream in = null;
try {
BytesWritable bw = (BytesWritable) arguments[1].get();
byte[] bytes = new byte[bw.getLength()];
System.arraycopy(bw.getBytes(), 0, bytes, 0, bw.getLength());
in = new NonSyncByteArrayInputStream(bytes);
bloomFilter = BloomKFilter.deserialize(in);
} catch (IOException e) {
throw new HiveException(e);
} finally {
IOUtils.closeStream(in);
}
initializedBloomFilter = true;
}
// Check if the value is in bloom filter
switch(((PrimitiveObjectInspector) valObjectInspector).getTypeInfo().getPrimitiveCategory()) {
case BOOLEAN:
boolean vBoolean = ((BooleanObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testLong(vBoolean ? 1 : 0);
case BYTE:
byte vByte = ((ByteObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testLong(vByte);
case SHORT:
short vShort = ((ShortObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testLong(vShort);
case INT:
int vInt = ((IntObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testLong(vInt);
case LONG:
long vLong = ((LongObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testLong(vLong);
case FLOAT:
float vFloat = ((FloatObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testDouble(vFloat);
case DOUBLE:
double vDouble = ((DoubleObjectInspector) valObjectInspector).get(arguments[0].get());
return bloomFilter.testDouble(vDouble);
case DECIMAL:
HiveDecimalWritable vDecimal = ((HiveDecimalObjectInspector) valObjectInspector).getPrimitiveWritableObject(arguments[0].get());
int startIdx = vDecimal.toBytes(scratchBuffer);
return bloomFilter.testBytes(scratchBuffer, startIdx, scratchBuffer.length - startIdx);
case DATE:
DateWritableV2 vDate = ((DateObjectInspector) valObjectInspector).getPrimitiveWritableObject(arguments[0].get());
return bloomFilter.testLong(vDate.getDays());
case TIMESTAMP:
Timestamp vTimeStamp = ((TimestampObjectInspector) valObjectInspector).getPrimitiveJavaObject(arguments[0].get());
return bloomFilter.testLong(vTimeStamp.toEpochMilli());
case CHAR:
Text vChar = ((HiveCharObjectInspector) valObjectInspector).getPrimitiveWritableObject(arguments[0].get()).getStrippedValue();
return bloomFilter.testBytes(vChar.getBytes(), 0, vChar.getLength());
case VARCHAR:
Text vVarchar = ((HiveVarcharObjectInspector) valObjectInspector).getPrimitiveWritableObject(arguments[0].get()).getTextValue();
return bloomFilter.testBytes(vVarchar.getBytes(), 0, vVarchar.getLength());
case STRING:
Text vString = ((StringObjectInspector) valObjectInspector).getPrimitiveWritableObject(arguments[0].get());
return bloomFilter.testBytes(vString.getBytes(), 0, vString.getLength());
case BINARY:
BytesWritable vBytes = ((BinaryObjectInspector) valObjectInspector).getPrimitiveWritableObject(arguments[0].get());
return bloomFilter.testBytes(vBytes.getBytes(), 0, vBytes.getLength());
default:
throw new UDFArgumentTypeException(0, "Bad primitive category " + ((PrimitiveTypeInfo) valObjectInspector).getPrimitiveCategory());
}
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class GenericUDFDateAdd method evaluate.
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
if (arguments[0].get() == null) {
return null;
}
Object daysWritableObject = daysConverter.convert(arguments[1].get());
if (daysWritableObject == null) {
return null;
}
int toBeAdded;
if (daysWritableObject instanceof ByteWritable) {
toBeAdded = ((ByteWritable) daysWritableObject).get();
} else if (daysWritableObject instanceof ShortWritable) {
toBeAdded = ((ShortWritable) daysWritableObject).get();
} else if (daysWritableObject instanceof IntWritable) {
toBeAdded = ((IntWritable) daysWritableObject).get();
} else {
return null;
}
// Convert the first param into a DateWritableV2 value
switch(inputType1) {
case STRING:
String dateString = dateConverter.convert(arguments[0].get()).toString();
if (DateParser.parseDate(dateString, dateVal)) {
output.set(dateVal);
} else {
return null;
}
break;
case TIMESTAMP:
Timestamp ts = ((TimestampWritableV2) dateConverter.convert(arguments[0].get())).getTimestamp();
output.set(DateWritableV2.millisToDays(ts.toEpochMilli()));
break;
case DATE:
DateWritableV2 dw = (DateWritableV2) dateConverter.convert(arguments[0].get());
output.set(dw.getDays());
break;
default:
throw new UDFArgumentException("DATE_ADD() only takes STRING/TIMESTAMP/DATEWRITABLE types, got " + inputType1);
}
int newDays = output.getDays() + (signModifier * toBeAdded);
output.set(newDays);
return output;
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class GenericUDFDateFormat method evaluate.
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
if (formatter == null) {
return null;
}
// the function should support both short date and full timestamp format
// time part of the timestamp should not be skipped
Timestamp ts = getTimestampValue(arguments, 0, tsConverters);
if (ts == null) {
return null;
}
Instant instant = Instant.ofEpochSecond(ts.toEpochSecond(), ts.getNanos());
ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(instant, ZoneOffset.UTC);
String res = formatter.format(zonedDateTime.withZoneSameLocal(timeZone));
output.set(res);
return output;
}
Aggregations