use of io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW in project trino by trinodb.
the class ClickHouseClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
ClickHouseColumn column = ClickHouseColumn.of("", jdbcTypeName);
ClickHouseDataType columnDataType = column.getDataType();
switch(columnDataType) {
case UInt8:
return Optional.of(ColumnMapping.longMapping(SMALLINT, ResultSet::getShort, uInt8WriteFunction()));
case UInt16:
return Optional.of(ColumnMapping.longMapping(INTEGER, ResultSet::getInt, uInt16WriteFunction()));
case UInt32:
return Optional.of(ColumnMapping.longMapping(BIGINT, ResultSet::getLong, uInt32WriteFunction()));
case UInt64:
return Optional.of(ColumnMapping.objectMapping(UINT64_TYPE, longDecimalReadFunction(UINT64_TYPE, UNNECESSARY), uInt64WriteFunction()));
case IPv4:
return Optional.of(ipAddressColumnMapping("IPv4StringToNum(?)"));
case IPv6:
return Optional.of(ipAddressColumnMapping("IPv6StringToNum(?)"));
case Enum8:
case Enum16:
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) Currently pushdown would not work and may require a custom bind expression
DISABLE_PUSHDOWN));
// FixedString(n)
case FixedString:
case String:
if (isMapStringAsVarchar(session)) {
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), DISABLE_PUSHDOWN));
}
// TODO (https://github.com/trinodb/trino/issues/7100) test & enable predicate pushdown
return Optional.of(varbinaryColumnMapping());
case UUID:
return Optional.of(uuidColumnMapping());
default:
}
switch(typeHandle.getJdbcType()) {
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
case Types.REAL:
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
int decimalDigits = typeHandle.getRequiredDecimalDigits();
int precision = typeHandle.getRequiredColumnSize();
ColumnMapping decimalColumnMapping;
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = Math.min(decimalDigits, getDecimalDefaultScale(session));
decimalColumnMapping = decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session));
} else {
decimalColumnMapping = decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)));
}
return Optional.of(new ColumnMapping(decimalColumnMapping.getType(), decimalColumnMapping.getReadFunction(), decimalColumnMapping.getWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) fix, enable and test decimal pushdown
DISABLE_PUSHDOWN));
case Types.DATE:
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIMESTAMP:
if (columnDataType == ClickHouseDataType.DateTime) {
verify(typeHandle.getRequiredDecimalDigits() == 0, "Expected 0 as timestamp precision, but got %s", typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_SECONDS, timestampReadFunction(TIMESTAMP_SECONDS), timestampSecondsWriteFunction()));
}
// TODO (https://github.com/trinodb/trino/issues/10537) Add support for Datetime64 type
return Optional.of(timestampColumnMappingUsingSqlTimestampWithRounding(TIMESTAMP_MILLIS));
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW in project trino by trinodb.
the class PostgreSqlClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(jdbcTypeName) {
case "money":
return Optional.of(moneyColumnMapping());
case "uuid":
return Optional.of(uuidColumnMapping());
case "jsonb":
case "json":
return Optional.of(jsonColumnMapping());
case "timestamptz":
// PostgreSQL's "timestamp with time zone" is reported as Types.TIMESTAMP rather than Types.TIMESTAMP_WITH_TIMEZONE
int decimalDigits = typeHandle.getRequiredDecimalDigits();
return Optional.of(timestampWithTimeZoneColumnMapping(decimalDigits));
case "hstore":
return Optional.of(hstoreColumnMapping(session));
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
return Optional.of(booleanColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.NUMERIC:
{
int columnSize = typeHandle.getRequiredColumnSize();
int precision;
int decimalDigits = typeHandle.getDecimalDigits().orElse(0);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL) {
// decimal type with unspecified scale - up to 131072 digits before the decimal point; up to 16383 digits after the decimal point)
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
precision = columnSize;
if (precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
}
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = columnSize + max(-decimalDigits, 0);
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL || precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
}
case Types.CHAR:
return Optional.of(charColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.VARCHAR:
if (!jdbcTypeName.equals("varchar")) {
// This can be e.g. an ENUM
return Optional.of(typedVarcharColumnMapping(jdbcTypeName));
}
return Optional.of(varcharColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.BINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, (resultSet, index) -> LocalDate.parse(resultSet.getString(index), DATE_FORMATTER).toEpochDay(), dateWriteFunctionUsingLocalDate()));
case Types.TIME:
return Optional.of(timeColumnMapping(typeHandle.getRequiredDecimalDigits()));
case Types.TIMESTAMP:
TimestampType timestampType = createTimestampType(typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(timestampType, timestampReadFunction(timestampType), PostgreSqlClient::shortTimestampWriteFunction));
case Types.ARRAY:
Optional<ColumnMapping> columnMapping = arrayToTrinoType(session, connection, typeHandle);
if (columnMapping.isPresent()) {
return columnMapping;
}
break;
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW in project trino by trinodb.
the class MySqlClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(jdbcTypeName.toLowerCase(ENGLISH)) {
case "tinyint unsigned":
return Optional.of(smallintColumnMapping());
case "smallint unsigned":
return Optional.of(integerColumnMapping());
case "int unsigned":
return Optional.of(bigintColumnMapping());
case "bigint unsigned":
return Optional.of(decimalColumnMapping(createDecimalType(20)));
case "json":
return Optional.of(jsonColumnMapping());
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
// attempts to treat them as exact in comparisons may lead to problems
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.NUMERIC:
case Types.DECIMAL:
int decimalDigits = typeHandle.getDecimalDigits().orElseThrow(() -> new IllegalStateException("decimal digits not present"));
int precision = typeHandle.getRequiredColumnSize();
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
// TODO does mysql support negative scale?
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = precision + max(-decimalDigits, 0);
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0))));
case Types.CHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), false));
// TODO not all these type constants are necessarily used by the JDBC driver
case Types.VARCHAR:
case Types.NVARCHAR:
case Types.LONGVARCHAR:
case Types.LONGNVARCHAR:
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), false));
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return Optional.of(ColumnMapping.sliceMapping(VARBINARY, varbinaryReadFunction(), varbinaryWriteFunction(), FULL_PUSHDOWN));
case Types.DATE:
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIME:
TimeType timeType = createTimeType(getTimePrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(timeColumnMapping(timeType));
case Types.TIMESTAMP:
TimestampType timestampType = createTimestampType(getTimestampPrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(timestampColumnMapping(timestampType));
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.DecimalConfig.DecimalMapping.ALLOW_OVERFLOW in project trino by trinodb.
the class SingleStoreClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
Optional<ColumnMapping> unsignedMapping = getUnsignedMapping(typeHandle);
if (unsignedMapping.isPresent()) {
return unsignedMapping;
}
if (jdbcTypeName.equalsIgnoreCase("json")) {
return Optional.of(jsonColumnMapping());
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
case Types.BOOLEAN:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
// attempts to treat them as exact in comparisons may lead to problems
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.CHAR:
case // TODO it it is dummy copied from StandardColumnMappings, verify if it is proper mapping
Types.NCHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), false));
case Types.VARCHAR:
case Types.LONGVARCHAR:
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), false));
case Types.DECIMAL:
int precision = typeHandle.getRequiredColumnSize();
int decimalDigits = typeHandle.getRequiredDecimalDigits();
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0))));
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, dateReadFunctionUsingLocalDate(), dateWriteFunction()));
case Types.TIME:
TimeType timeType = createTimeType(getTimePrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(ColumnMapping.longMapping(timeType, singleStoreTimeReadFunction(timeType), timeWriteFunction(timeType.getPrecision())));
case Types.TIMESTAMP:
// TODO (https://github.com/trinodb/trino/issues/5450) Fix DST handling
TimestampType timestampType = createTimestampType(getTimestampPrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(timestampColumnMapping(timestampType));
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
Aggregations