use of io.trino.spi.type.TimestampWithTimeZoneType in project trino by trinodb.
the class PostgreSqlClient method toWriteMapping.
@Override
public WriteMapping toWriteMapping(ConnectorSession session, Type type) {
if (type == BOOLEAN) {
return WriteMapping.booleanMapping("boolean", booleanWriteFunction());
}
if (type == TINYINT) {
// PostgreSQL has no type corresponding to tinyint
return WriteMapping.longMapping("smallint", tinyintWriteFunction());
}
if (type == SMALLINT) {
return WriteMapping.longMapping("smallint", smallintWriteFunction());
}
if (type == INTEGER) {
return WriteMapping.longMapping("integer", integerWriteFunction());
}
if (type == BIGINT) {
return WriteMapping.longMapping("bigint", bigintWriteFunction());
}
if (type == REAL) {
return WriteMapping.longMapping("real", realWriteFunction());
}
if (type == DOUBLE) {
return WriteMapping.doubleMapping("double precision", doubleWriteFunction());
}
if (type instanceof DecimalType) {
DecimalType decimalType = (DecimalType) type;
String dataType = format("decimal(%s, %s)", decimalType.getPrecision(), decimalType.getScale());
if (decimalType.isShort()) {
return WriteMapping.longMapping(dataType, shortDecimalWriteFunction(decimalType));
}
return WriteMapping.objectMapping(dataType, longDecimalWriteFunction(decimalType));
}
if (type instanceof CharType) {
return WriteMapping.sliceMapping("char(" + ((CharType) type).getLength() + ")", charWriteFunction());
}
if (type instanceof VarcharType) {
VarcharType varcharType = (VarcharType) type;
String dataType;
if (varcharType.isUnbounded()) {
dataType = "varchar";
} else {
dataType = "varchar(" + varcharType.getBoundedLength() + ")";
}
return WriteMapping.sliceMapping(dataType, varcharWriteFunction());
}
if (VARBINARY.equals(type)) {
return WriteMapping.sliceMapping("bytea", varbinaryWriteFunction());
}
if (type == DATE) {
return WriteMapping.longMapping("date", dateWriteFunctionUsingLocalDate());
}
if (type instanceof TimeType) {
TimeType timeType = (TimeType) type;
if (timeType.getPrecision() <= POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION) {
return WriteMapping.longMapping(format("time(%s)", timeType.getPrecision()), timeWriteFunction(timeType.getPrecision()));
}
return WriteMapping.longMapping(format("time(%s)", POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION), timeWriteFunction(POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION));
}
if (type instanceof TimestampType) {
TimestampType timestampType = (TimestampType) type;
if (timestampType.getPrecision() <= POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION) {
verify(timestampType.getPrecision() <= TimestampType.MAX_SHORT_PRECISION);
return WriteMapping.longMapping(format("timestamp(%s)", timestampType.getPrecision()), PostgreSqlClient::shortTimestampWriteFunction);
}
verify(timestampType.getPrecision() > TimestampType.MAX_SHORT_PRECISION);
return WriteMapping.objectMapping(format("timestamp(%s)", POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION), longTimestampWriteFunction());
}
if (type instanceof TimestampWithTimeZoneType) {
TimestampWithTimeZoneType timestampWithTimeZoneType = (TimestampWithTimeZoneType) type;
if (timestampWithTimeZoneType.getPrecision() <= POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION) {
String dataType = format("timestamptz(%d)", timestampWithTimeZoneType.getPrecision());
if (timestampWithTimeZoneType.getPrecision() <= TimestampWithTimeZoneType.MAX_SHORT_PRECISION) {
return WriteMapping.longMapping(dataType, shortTimestampWithTimeZoneWriteFunction());
}
return WriteMapping.objectMapping(dataType, longTimestampWithTimeZoneWriteFunction());
}
return WriteMapping.objectMapping(format("timestamptz(%d)", POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION), longTimestampWithTimeZoneWriteFunction());
}
if (type.equals(jsonType)) {
return WriteMapping.sliceMapping("jsonb", typedVarcharWriteFunction("json"));
}
if (type.equals(uuidType)) {
return WriteMapping.sliceMapping("uuid", uuidWriteFunction());
}
if (type instanceof ArrayType && getArrayMapping(session) == AS_ARRAY) {
Type elementType = ((ArrayType) type).getElementType();
String elementDataType = toWriteMapping(session, elementType).getDataType();
return WriteMapping.objectMapping(elementDataType + "[]", arrayWriteFunction(session, elementType, getArrayElementPgTypeName(session, this, elementType)));
}
throw new TrinoException(NOT_SUPPORTED, "Unsupported column type: " + type.getDisplayName());
}
use of io.trino.spi.type.TimestampWithTimeZoneType in project trino by trinodb.
the class PostgreSqlClient method timestampWithTimeZoneColumnMapping.
private static ColumnMapping timestampWithTimeZoneColumnMapping(int precision) {
// PostgreSQL supports timestamptz precision up to microseconds
checkArgument(precision <= POSTGRESQL_MAX_SUPPORTED_TIMESTAMP_PRECISION, "unsupported precision value %s", precision);
TimestampWithTimeZoneType prestoType = createTimestampWithTimeZoneType(precision);
if (precision <= TimestampWithTimeZoneType.MAX_SHORT_PRECISION) {
return ColumnMapping.longMapping(prestoType, shortTimestampWithTimeZoneReadFunction(), shortTimestampWithTimeZoneWriteFunction());
}
return ColumnMapping.objectMapping(prestoType, longTimestampWithTimeZoneReadFunction(), longTimestampWithTimeZoneWriteFunction());
}
use of io.trino.spi.type.TimestampWithTimeZoneType in project TiBigData by tidb-incubator.
the class TypeHelpers method toSqlString.
public static String toSqlString(Type type) {
if (type instanceof TimeWithTimeZoneType || type instanceof TimestampWithTimeZoneType) {
throw new TrinoException(NOT_SUPPORTED, "Unsupported column type: " + type.getDisplayName());
}
if (type instanceof TimestampType) {
return format("timestamp(%s)", ((TimestampType) type).getPrecision());
}
if (type instanceof VarcharType) {
VarcharType varcharType = (VarcharType) type;
if (varcharType.isUnbounded()) {
return "longtext";
}
Integer length = varcharType.getLength().orElseThrow(IllegalStateException::new);
if (length <= 255) {
return "tinytext";
}
if (length <= 65535) {
return "text";
}
if (length <= 16777215) {
return "mediumtext";
}
return "longtext";
}
if (type instanceof CharType) {
int length = ((CharType) type).getLength();
if (length <= 255) {
return "char(" + length + ")";
}
return "text";
}
if (type instanceof DecimalType) {
return format("decimal(%s, %s)", ((DecimalType) type).getPrecision(), ((DecimalType) type).getScale());
}
if (type instanceof TimeType) {
return format("time(%s)", ((TimeType) type).getPrecision());
}
String sqlType = SQL_TYPES.get(type);
if (sqlType != null) {
return sqlType;
}
return type.getDisplayName();
}
use of io.trino.spi.type.TimestampWithTimeZoneType in project trino by trinodb.
the class DeltaLakeParquetStatisticsUtils method getMax.
private static Optional<Object> getMax(Type type, Statistics<?> statistics) {
if (statistics.genericGetMax() == null) {
return Optional.empty();
}
if (type.equals(DateType.DATE)) {
checkArgument(statistics instanceof IntStatistics, "Column with DATE type contained invalid statistics: %s", statistics);
IntStatistics intStatistics = (IntStatistics) statistics;
LocalDate date = LocalDate.ofEpochDay(intStatistics.genericGetMax());
return Optional.of(date.format(ISO_LOCAL_DATE));
}
if (type instanceof TimestampWithTimeZoneType) {
if (statistics instanceof LongStatistics) {
Instant ts = Instant.ofEpochMilli(((LongStatistics) statistics).genericGetMax());
return Optional.of(ISO_INSTANT.format(ZonedDateTime.ofInstant(ts, UTC)));
} else if (statistics instanceof BinaryStatistics) {
DecodedTimestamp decodedTimestamp = decodeInt96Timestamp(((BinaryStatistics) statistics).genericGetMax());
Instant ts = Instant.ofEpochSecond(decodedTimestamp.getEpochSeconds(), decodedTimestamp.getNanosOfSecond());
ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(ts, UTC);
ZonedDateTime truncatedToMillis = zonedDateTime.truncatedTo(MILLIS);
if (truncatedToMillis.isBefore(zonedDateTime)) {
truncatedToMillis = truncatedToMillis.plus(1, MILLIS);
}
return Optional.of(ISO_INSTANT.format(truncatedToMillis));
}
}
if (type.equals(BIGINT) || type.equals(TINYINT) || type.equals(SMALLINT) || type.equals(INTEGER)) {
checkArgument(statistics instanceof IntStatistics || statistics instanceof LongStatistics, "Column with %s type contained invalid statistics: %s", type, statistics);
return Optional.of(statistics.genericGetMax());
}
if (type.equals(REAL)) {
checkArgument(statistics instanceof FloatStatistics, "Column with REAL type contained invalid statistics: %s", statistics);
return Optional.of(((FloatStatistics) statistics).genericGetMax());
}
if (type.equals(DOUBLE)) {
checkArgument(statistics instanceof DoubleStatistics, "Column with DOUBLE type contained invalid statistics: %s", statistics);
return Optional.of(((DoubleStatistics) statistics).genericGetMax());
}
if (type instanceof DecimalType) {
LogicalTypeAnnotation logicalType = statistics.type().getLogicalTypeAnnotation();
checkArgument(logicalType instanceof LogicalTypeAnnotation.DecimalLogicalTypeAnnotation, "DECIMAL column had invalid Parquet Logical Type: %s", logicalType);
int scale = ((LogicalTypeAnnotation.DecimalLogicalTypeAnnotation) logicalType).getScale();
BigDecimal max;
if (statistics instanceof IntStatistics) {
max = BigDecimal.valueOf(((IntStatistics) statistics).getMax()).movePointLeft(scale);
return Optional.of(max.toPlainString());
} else if (statistics instanceof LongStatistics) {
max = BigDecimal.valueOf(((LongStatistics) statistics).getMax()).movePointLeft(scale);
return Optional.of(max.toPlainString());
} else if (statistics instanceof BinaryStatistics) {
BigInteger base = new BigInteger(((BinaryStatistics) statistics).genericGetMax().getBytes());
max = new BigDecimal(base, scale);
return Optional.of(max.toPlainString());
}
}
if (type instanceof VarcharType) {
return Optional.of(new String(((BinaryStatistics) statistics).genericGetMax().getBytes(), UTF_8));
}
if (type.equals(BOOLEAN)) {
// Boolean columns do not collect min/max stats
return Optional.empty();
}
LOG.warn("Accumulating Parquet statistics with Trino type: %s and Parquet statistics of type: %s is not supported", type, statistics);
return Optional.empty();
}
use of io.trino.spi.type.TimestampWithTimeZoneType in project trino by trinodb.
the class DeltaLakePageSink method createParquetFileWriter.
private FileWriter createParquetFileWriter(Path path) {
ParquetWriterOptions parquetWriterOptions = ParquetWriterOptions.builder().setMaxBlockSize(getParquetWriterBlockSize(session)).setMaxPageSize(getParquetWriterPageSize(session)).build();
CompressionCodecName compressionCodecName = getCompressionCodec(session).getParquetCompressionCodec();
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getIdentity(), path, conf);
Callable<Void> rollbackAction = () -> {
fileSystem.delete(path, false);
return null;
};
List<Type> parquetTypes = dataColumnTypes.stream().map(type -> {
if (type instanceof TimestampWithTimeZoneType) {
verify(((TimestampWithTimeZoneType) type).getPrecision() == 3, "Unsupported type: %s", type);
return TIMESTAMP_MILLIS;
}
return type;
}).collect(toImmutableList());
// we use identity column mapping; input page already contains only data columns per
// DataLagePageSink.getDataPage()
int[] identityMapping = new int[dataColumnTypes.size()];
for (int i = 0; i < identityMapping.length; ++i) {
identityMapping[i] = i;
}
ParquetSchemaConverter schemaConverter = new ParquetSchemaConverter(parquetTypes, dataColumnNames);
return new ParquetFileWriter(fileSystem.create(path), rollbackAction, parquetTypes, schemaConverter.getMessageType(), schemaConverter.getPrimitiveTypes(), parquetWriterOptions, identityMapping, compressionCodecName, trinoVersion);
} catch (IOException e) {
throw new TrinoException(DELTA_LAKE_BAD_WRITE, "Error creating Parquet file", e);
}
}
Aggregations