use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class RandomTypeUtil method getRandTimestamp.
public static Timestamp getRandTimestamp(Random r, int minYear, int maxYear) {
String optionalNanos = "";
switch(r.nextInt(4)) {
case 0:
// No nanos.
break;
case 1:
optionalNanos = String.format(".%09d", Integer.valueOf(r.nextInt((int) NANOSECONDS_PER_SECOND)));
break;
case 2:
// Limit to milliseconds only...
optionalNanos = String.format(".%09d", Integer.valueOf(r.nextInt((int) MILLISECONDS_PER_SECOND)) * NANOSECONDS_PER_MILLISECOND);
break;
case 3:
// Limit to below milliseconds only...
optionalNanos = String.format(".%09d", Integer.valueOf(r.nextInt((int) NANOSECONDS_PER_MILLISECOND)));
break;
}
String timestampStr = String.format("%04d-%02d-%02d %02d:%02d:%02d%s", // year
Integer.valueOf(minYear + r.nextInt(maxYear - minYear + 1)), // month
Integer.valueOf(1 + r.nextInt(12)), // day
Integer.valueOf(1 + r.nextInt(28)), // hour
Integer.valueOf(0 + r.nextInt(24)), // minute
Integer.valueOf(0 + r.nextInt(60)), // second
Integer.valueOf(0 + r.nextInt(60)), optionalNanos);
Timestamp timestampVal;
try {
timestampVal = Timestamp.valueOf(timestampStr);
} catch (Exception e) {
System.err.println("Timestamp string " + timestampStr + " did not parse");
throw e;
}
return timestampVal;
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class AvroDeserializer method deserializePrimitive.
private Object deserializePrimitive(Object datum, Schema fileSchema, Schema recordSchema, PrimitiveTypeInfo columnType) throws AvroSerdeException {
switch(columnType.getPrimitiveCategory()) {
case STRING:
// To workaround AvroUTF8
return datum.toString();
// and convert it to a string. Yay!
case BINARY:
if (recordSchema.getType() == Type.FIXED) {
Fixed fixed = (Fixed) datum;
return fixed.bytes();
} else if (recordSchema.getType() == Type.BYTES) {
return AvroSerdeUtils.getBytesFromByteBuffer((ByteBuffer) datum);
} else {
throw new AvroSerdeException("Unexpected Avro schema for Binary TypeInfo: " + recordSchema.getType());
}
case DECIMAL:
if (fileSchema == null) {
throw new AvroSerdeException("File schema is missing for decimal field. Reader schema is " + columnType);
}
int scale = 0;
try {
scale = AvroSerdeUtils.getIntFromSchema(fileSchema, AvroSerDe.AVRO_PROP_SCALE);
} catch (Exception ex) {
throw new AvroSerdeException("Failed to obtain scale value from file schema: " + fileSchema, ex);
}
HiveDecimal dec = AvroSerdeUtils.getHiveDecimalFromByteBuffer((ByteBuffer) datum, scale);
JavaHiveDecimalObjectInspector oi = (JavaHiveDecimalObjectInspector) PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector((DecimalTypeInfo) columnType);
return oi.set(null, dec);
case CHAR:
if (fileSchema == null) {
throw new AvroSerdeException("File schema is missing for char field. Reader schema is " + columnType);
}
int maxLength = 0;
try {
maxLength = AvroSerdeUtils.getIntFromSchema(fileSchema, AvroSerDe.AVRO_PROP_MAX_LENGTH);
} catch (Exception ex) {
throw new AvroSerdeException("Failed to obtain maxLength value for char field from file schema: " + fileSchema, ex);
}
String str = datum.toString();
HiveChar hc = new HiveChar(str, maxLength);
return hc;
case VARCHAR:
if (fileSchema == null) {
throw new AvroSerdeException("File schema is missing for varchar field. Reader schema is " + columnType);
}
maxLength = 0;
try {
maxLength = AvroSerdeUtils.getIntFromSchema(fileSchema, AvroSerDe.AVRO_PROP_MAX_LENGTH);
} catch (Exception ex) {
throw new AvroSerdeException("Failed to obtain maxLength value for varchar field from file schema: " + fileSchema, ex);
}
str = datum.toString();
HiveVarchar hvc = new HiveVarchar(str, maxLength);
return hvc;
case DATE:
{
if (recordSchema.getType() != Type.INT) {
throw new AvroSerdeException("Unexpected Avro schema for Date TypeInfo: " + recordSchema.getType());
}
final boolean skipProlepticConversion;
if (writerProleptic != null) {
skipProlepticConversion = writerProleptic;
} else {
if (configuration != null) {
skipProlepticConversion = HiveConf.getBoolVar(configuration, HiveConf.ConfVars.HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT);
} else {
skipProlepticConversion = HiveConf.ConfVars.HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT.defaultBoolVal;
}
}
return Date.ofEpochMilli(DateWritableV2.daysToMillis(skipProlepticConversion ? (Integer) datum : CalendarUtils.convertDateToProleptic((Integer) datum)));
}
case TIMESTAMP:
{
if (recordSchema.getType() != Type.LONG) {
throw new AvroSerdeException("Unexpected Avro schema for Date TypeInfo: " + recordSchema.getType());
}
// If a time zone is found in file metadata (property name: writer.time.zone), convert the
// timestamp to that (writer) time zone in order to emulate time zone agnostic behavior.
// If not, then the file was written by an older version of hive, so we convert the timestamp
// to the server's (reader) time zone for backwards compatibility reasons - unless the
// session level configuration hive.avro.timestamp.skip.conversion is set to true, in which
// case we assume it was written by a time zone agnostic writer, so we don't convert it.
final boolean skipUTCConversion;
if (configuration != null) {
skipUTCConversion = HiveConf.getBoolVar(configuration, HiveConf.ConfVars.HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION);
} else {
skipUTCConversion = HiveConf.ConfVars.HIVE_AVRO_TIMESTAMP_SKIP_CONVERSION.defaultBoolVal;
}
final boolean legacyConversion;
if (writerZoneConversionLegacy != null) {
legacyConversion = writerZoneConversionLegacy;
} else if (writerTimezone != null) {
legacyConversion = false;
} else if (configuration != null) {
legacyConversion = HiveConf.getBoolVar(configuration, HiveConf.ConfVars.HIVE_AVRO_TIMESTAMP_LEGACY_CONVERSION_ENABLED);
} else {
legacyConversion = HiveConf.ConfVars.HIVE_AVRO_TIMESTAMP_LEGACY_CONVERSION_ENABLED.defaultBoolVal;
}
ZoneId convertToTimeZone;
if (writerTimezone != null) {
convertToTimeZone = writerTimezone;
} else if (skipUTCConversion) {
convertToTimeZone = ZoneOffset.UTC;
} else {
convertToTimeZone = TimeZone.getDefault().toZoneId();
}
final boolean skipProlepticConversion;
if (writerProleptic != null) {
skipProlepticConversion = writerProleptic;
} else {
if (configuration != null) {
skipProlepticConversion = HiveConf.getBoolVar(configuration, HiveConf.ConfVars.HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT);
} else {
skipProlepticConversion = HiveConf.ConfVars.HIVE_AVRO_PROLEPTIC_GREGORIAN_DEFAULT.defaultBoolVal;
}
}
Timestamp timestamp = TimestampTZUtil.convertTimestampToZone(Timestamp.ofEpochMilli((Long) datum), ZoneOffset.UTC, convertToTimeZone, legacyConversion);
if (!skipProlepticConversion) {
timestamp = Timestamp.ofEpochMilli(CalendarUtils.convertTimeToProleptic(timestamp.toEpochMilli()));
}
return timestamp;
}
default:
return datum;
}
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class TimestampWritableV2 method createTimestamp.
public static Timestamp createTimestamp(byte[] bytes, int offset) {
Timestamp t = new Timestamp();
TimestampWritableV2.setTimestamp(t, bytes, offset);
return t;
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class TimestampUtils method timestampColumnVectorWritable.
public static TimestampWritableV2 timestampColumnVectorWritable(TimestampColumnVector timestampColVector, int elementNum, TimestampWritableV2 timestampWritable) {
java.sql.Timestamp ts = timestampColVector.asScratchTimestamp(elementNum);
if (ts == null) {
timestampWritable.set((Timestamp) null);
return timestampWritable;
}
Timestamp newTS = Timestamp.ofEpochMilli(ts.getTime(), ts.getNanos());
timestampWritable.set(newTS);
return timestampWritable;
}
use of org.apache.hadoop.hive.common.type.Timestamp in project hive by apache.
the class NanoTimeUtils method getTimestamp.
/**
* Converts a nanotime representation in UTC, to a timestamp in the specified timezone.
*
* @param legacyConversion when true the conversion to the target timezone is done with legacy (backwards compatible)
* method.
*/
public static Timestamp getTimestamp(NanoTime nt, ZoneId targetZone, boolean legacyConversion) {
int julianDay = nt.getJulianDay();
long nanosOfDay = nt.getTimeOfDayNanos();
long remainder = nanosOfDay;
julianDay += remainder / NANOS_PER_DAY;
remainder %= NANOS_PER_DAY;
if (remainder < 0) {
remainder += NANOS_PER_DAY;
julianDay--;
}
JulianDate jDateTime;
jDateTime = JulianDate.of((double) julianDay);
Calendar calendar = getGMTCalendar();
calendar.set(Calendar.YEAR, jDateTime.toLocalDateTime().getYear());
// java calendar index starting at 1.
calendar.set(Calendar.MONTH, jDateTime.toLocalDateTime().getMonth().getValue() - 1);
calendar.set(Calendar.DAY_OF_MONTH, jDateTime.toLocalDateTime().getDayOfMonth());
int hour = (int) (remainder / (NANOS_PER_HOUR));
remainder = remainder % (NANOS_PER_HOUR);
int minutes = (int) (remainder / (NANOS_PER_MINUTE));
remainder = remainder % (NANOS_PER_MINUTE);
int seconds = (int) (remainder / (NANOS_PER_SECOND));
long nanos = remainder % NANOS_PER_SECOND;
calendar.set(Calendar.HOUR_OF_DAY, hour);
calendar.set(Calendar.MINUTE, minutes);
calendar.set(Calendar.SECOND, seconds);
Timestamp ts = Timestamp.ofEpochMilli(calendar.getTimeInMillis(), (int) nanos);
ts = TimestampTZUtil.convertTimestampToZone(ts, ZoneOffset.UTC, targetZone, legacyConversion);
return ts;
}
Aggregations