use of io.trino.plugin.jdbc.ColumnMapping in project trino by trinodb.
the class ClickHouseClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
ClickHouseColumn column = ClickHouseColumn.of("", jdbcTypeName);
ClickHouseDataType columnDataType = column.getDataType();
switch(columnDataType) {
case UInt8:
return Optional.of(ColumnMapping.longMapping(SMALLINT, ResultSet::getShort, uInt8WriteFunction()));
case UInt16:
return Optional.of(ColumnMapping.longMapping(INTEGER, ResultSet::getInt, uInt16WriteFunction()));
case UInt32:
return Optional.of(ColumnMapping.longMapping(BIGINT, ResultSet::getLong, uInt32WriteFunction()));
case UInt64:
return Optional.of(ColumnMapping.objectMapping(UINT64_TYPE, longDecimalReadFunction(UINT64_TYPE, UNNECESSARY), uInt64WriteFunction()));
case IPv4:
return Optional.of(ipAddressColumnMapping("IPv4StringToNum(?)"));
case IPv6:
return Optional.of(ipAddressColumnMapping("IPv6StringToNum(?)"));
case Enum8:
case Enum16:
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) Currently pushdown would not work and may require a custom bind expression
DISABLE_PUSHDOWN));
// FixedString(n)
case FixedString:
case String:
if (isMapStringAsVarchar(session)) {
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), DISABLE_PUSHDOWN));
}
// TODO (https://github.com/trinodb/trino/issues/7100) test & enable predicate pushdown
return Optional.of(varbinaryColumnMapping());
case UUID:
return Optional.of(uuidColumnMapping());
default:
}
switch(typeHandle.getJdbcType()) {
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
case Types.REAL:
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
int decimalDigits = typeHandle.getRequiredDecimalDigits();
int precision = typeHandle.getRequiredColumnSize();
ColumnMapping decimalColumnMapping;
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = Math.min(decimalDigits, getDecimalDefaultScale(session));
decimalColumnMapping = decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session));
} else {
decimalColumnMapping = decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)));
}
return Optional.of(new ColumnMapping(decimalColumnMapping.getType(), decimalColumnMapping.getReadFunction(), decimalColumnMapping.getWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) fix, enable and test decimal pushdown
DISABLE_PUSHDOWN));
case Types.DATE:
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIMESTAMP:
if (columnDataType == ClickHouseDataType.DateTime) {
verify(typeHandle.getRequiredDecimalDigits() == 0, "Expected 0 as timestamp precision, but got %s", typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_SECONDS, timestampReadFunction(TIMESTAMP_SECONDS), timestampSecondsWriteFunction()));
}
// TODO (https://github.com/trinodb/trino/issues/10537) Add support for Datetime64 type
return Optional.of(timestampColumnMappingUsingSqlTimestampWithRounding(TIMESTAMP_MILLIS));
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.ColumnMapping in project trino by trinodb.
the class PhoenixClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(typeHandle.getJdbcType()) {
case Types.BOOLEAN:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
Optional<Integer> columnSize = typeHandle.getColumnSize();
int precision = columnSize.orElse(DEFAULT_PRECISION);
int decimalDigits = typeHandle.getDecimalDigits().orElse(DEFAULT_SCALE);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize.isEmpty()) {
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
}
// TODO does phoenix support negative scale?
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = precision + max(-decimalDigits, 0);
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
case Types.CHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case VARCHAR:
case NVARCHAR:
case LONGVARCHAR:
case LONGNVARCHAR:
if (typeHandle.getColumnSize().isEmpty()) {
return Optional.of(varcharColumnMapping(createUnboundedVarcharType(), true));
}
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case Types.BINARY:
case Types.VARBINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, dateReadFunction(), dateWriteFunctionUsingString()));
// TODO add support for TIMESTAMP after Phoenix adds support for LocalDateTime
case TIMESTAMP:
case TIME_WITH_TIMEZONE:
case TIMESTAMP_WITH_TIMEZONE:
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
case ARRAY:
JdbcTypeHandle elementTypeHandle = getArrayElementTypeHandle(typeHandle);
if (elementTypeHandle.getJdbcType() == Types.VARBINARY) {
return Optional.empty();
}
return toColumnMapping(session, connection, elementTypeHandle).map(elementMapping -> {
ArrayType trinoArrayType = new ArrayType(elementMapping.getType());
String jdbcTypeName = elementTypeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(PHOENIX_METADATA_ERROR, "Type name is missing for jdbc type: " + JDBCType.valueOf(elementTypeHandle.getJdbcType())));
// TODO (https://github.com/trinodb/trino/issues/11132) Enable predicate pushdown on ARRAY(CHAR) type in Phoenix
PredicatePushdownController pushdownController = elementTypeHandle.getJdbcType() == Types.CHAR ? DISABLE_PUSHDOWN : FULL_PUSHDOWN;
return arrayColumnMapping(session, trinoArrayType, jdbcTypeName, pushdownController);
});
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.ColumnMapping in project trino by trinodb.
the class PhoenixClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(typeHandle.getJdbcType()) {
case Types.BOOLEAN:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
Optional<Integer> columnSize = typeHandle.getColumnSize();
int precision = columnSize.orElse(DEFAULT_PRECISION);
int decimalDigits = typeHandle.getDecimalDigits().orElse(DEFAULT_SCALE);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize.isEmpty()) {
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
}
// TODO does phoenix support negative scale?
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = precision + max(-decimalDigits, 0);
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
case Types.CHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case VARCHAR:
case NVARCHAR:
case LONGVARCHAR:
case LONGNVARCHAR:
if (typeHandle.getColumnSize().isEmpty()) {
return Optional.of(varcharColumnMapping(createUnboundedVarcharType(), true));
}
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case Types.BINARY:
case Types.VARBINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, dateReadFunction(), dateWriteFunctionUsingString()));
// TODO add support for TIMESTAMP after Phoenix adds support for LocalDateTime
case TIMESTAMP:
case TIME_WITH_TIMEZONE:
case TIMESTAMP_WITH_TIMEZONE:
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
case ARRAY:
JdbcTypeHandle elementTypeHandle = getArrayElementTypeHandle(typeHandle);
if (elementTypeHandle.getJdbcType() == Types.VARBINARY) {
return Optional.empty();
}
return toColumnMapping(session, connection, elementTypeHandle).map(elementMapping -> {
ArrayType trinoArrayType = new ArrayType(elementMapping.getType());
String jdbcTypeName = elementTypeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(PHOENIX_METADATA_ERROR, "Type name is missing for jdbc type: " + JDBCType.valueOf(elementTypeHandle.getJdbcType())));
// TODO (https://github.com/trinodb/trino/issues/11132) Enable predicate pushdown on ARRAY(CHAR) type in Phoenix
PredicatePushdownController pushdown = elementTypeHandle.getJdbcType() == Types.CHAR ? DISABLE_PUSHDOWN : FULL_PUSHDOWN;
return arrayColumnMapping(session, trinoArrayType, jdbcTypeName, pushdown);
});
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.ColumnMapping in project trino by trinodb.
the class PostgreSqlClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(jdbcTypeName) {
case "money":
return Optional.of(moneyColumnMapping());
case "uuid":
return Optional.of(uuidColumnMapping());
case "jsonb":
case "json":
return Optional.of(jsonColumnMapping());
case "timestamptz":
// PostgreSQL's "timestamp with time zone" is reported as Types.TIMESTAMP rather than Types.TIMESTAMP_WITH_TIMEZONE
int decimalDigits = typeHandle.getRequiredDecimalDigits();
return Optional.of(timestampWithTimeZoneColumnMapping(decimalDigits));
case "hstore":
return Optional.of(hstoreColumnMapping(session));
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
return Optional.of(booleanColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.NUMERIC:
{
int columnSize = typeHandle.getRequiredColumnSize();
int precision;
int decimalDigits = typeHandle.getDecimalDigits().orElse(0);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL) {
// decimal type with unspecified scale - up to 131072 digits before the decimal point; up to 16383 digits after the decimal point)
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
precision = columnSize;
if (precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
}
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = columnSize + max(-decimalDigits, 0);
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL || precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
}
case Types.CHAR:
return Optional.of(charColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.VARCHAR:
if (!jdbcTypeName.equals("varchar")) {
// This can be e.g. an ENUM
return Optional.of(typedVarcharColumnMapping(jdbcTypeName));
}
return Optional.of(varcharColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.BINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, (resultSet, index) -> LocalDate.parse(resultSet.getString(index), DATE_FORMATTER).toEpochDay(), dateWriteFunctionUsingLocalDate()));
case Types.TIME:
return Optional.of(timeColumnMapping(typeHandle.getRequiredDecimalDigits()));
case Types.TIMESTAMP:
TimestampType timestampType = createTimestampType(typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(timestampType, timestampReadFunction(timestampType), PostgreSqlClient::shortTimestampWriteFunction));
case Types.ARRAY:
Optional<ColumnMapping> columnMapping = arrayToTrinoType(session, connection, typeHandle);
if (columnMapping.isPresent()) {
return columnMapping;
}
break;
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.ColumnMapping in project trino by trinodb.
the class PostgreSqlClient method arrayToTrinoType.
private Optional<ColumnMapping> arrayToTrinoType(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
checkArgument(typeHandle.getJdbcType() == Types.ARRAY, "Not array type");
ArrayMapping arrayMapping = getArrayMapping(session);
if (arrayMapping == DISABLED) {
return Optional.empty();
}
// resolve and map base array element type
JdbcTypeHandle baseElementTypeHandle = getArrayElementTypeHandle(connection, typeHandle);
String baseElementTypeName = baseElementTypeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Element type name is missing: " + baseElementTypeHandle));
if (baseElementTypeHandle.getJdbcType() == Types.BINARY) {
// https://github.com/pgjdbc/pgjdbc/pull/1184
return Optional.empty();
}
Optional<ColumnMapping> baseElementMapping = toColumnMapping(session, connection, baseElementTypeHandle);
if (arrayMapping == AS_ARRAY) {
if (typeHandle.getArrayDimensions().isEmpty()) {
return Optional.empty();
}
return baseElementMapping.map(elementMapping -> {
ArrayType prestoArrayType = new ArrayType(elementMapping.getType());
ColumnMapping arrayColumnMapping = arrayColumnMapping(session, prestoArrayType, elementMapping, baseElementTypeName);
int arrayDimensions = typeHandle.getArrayDimensions().get();
for (int i = 1; i < arrayDimensions; i++) {
prestoArrayType = new ArrayType(prestoArrayType);
arrayColumnMapping = arrayColumnMapping(session, prestoArrayType, arrayColumnMapping, baseElementTypeName);
}
return arrayColumnMapping;
});
}
if (arrayMapping == AS_JSON) {
return baseElementMapping.map(elementMapping -> arrayAsJsonColumnMapping(session, elementMapping));
}
throw new IllegalStateException("Unsupported array mapping type: " + arrayMapping);
}
Aggregations