use of io.trino.plugin.jdbc.JdbcErrorCode.JDBC_ERROR in project trino by trinodb.
the class ClickHouseClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
ClickHouseColumn column = ClickHouseColumn.of("", jdbcTypeName);
ClickHouseDataType columnDataType = column.getDataType();
switch(columnDataType) {
case UInt8:
return Optional.of(ColumnMapping.longMapping(SMALLINT, ResultSet::getShort, uInt8WriteFunction()));
case UInt16:
return Optional.of(ColumnMapping.longMapping(INTEGER, ResultSet::getInt, uInt16WriteFunction()));
case UInt32:
return Optional.of(ColumnMapping.longMapping(BIGINT, ResultSet::getLong, uInt32WriteFunction()));
case UInt64:
return Optional.of(ColumnMapping.objectMapping(UINT64_TYPE, longDecimalReadFunction(UINT64_TYPE, UNNECESSARY), uInt64WriteFunction()));
case IPv4:
return Optional.of(ipAddressColumnMapping("IPv4StringToNum(?)"));
case IPv6:
return Optional.of(ipAddressColumnMapping("IPv6StringToNum(?)"));
case Enum8:
case Enum16:
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) Currently pushdown would not work and may require a custom bind expression
DISABLE_PUSHDOWN));
// FixedString(n)
case FixedString:
case String:
if (isMapStringAsVarchar(session)) {
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), DISABLE_PUSHDOWN));
}
// TODO (https://github.com/trinodb/trino/issues/7100) test & enable predicate pushdown
return Optional.of(varbinaryColumnMapping());
case UUID:
return Optional.of(uuidColumnMapping());
default:
}
switch(typeHandle.getJdbcType()) {
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
case Types.REAL:
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
int decimalDigits = typeHandle.getRequiredDecimalDigits();
int precision = typeHandle.getRequiredColumnSize();
ColumnMapping decimalColumnMapping;
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = Math.min(decimalDigits, getDecimalDefaultScale(session));
decimalColumnMapping = decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session));
} else {
decimalColumnMapping = decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)));
}
return Optional.of(new ColumnMapping(decimalColumnMapping.getType(), decimalColumnMapping.getReadFunction(), decimalColumnMapping.getWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) fix, enable and test decimal pushdown
DISABLE_PUSHDOWN));
case Types.DATE:
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIMESTAMP:
if (columnDataType == ClickHouseDataType.DateTime) {
verify(typeHandle.getRequiredDecimalDigits() == 0, "Expected 0 as timestamp precision, but got %s", typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_SECONDS, timestampReadFunction(TIMESTAMP_SECONDS), timestampSecondsWriteFunction()));
}
// TODO (https://github.com/trinodb/trino/issues/10537) Add support for Datetime64 type
return Optional.of(timestampColumnMappingUsingSqlTimestampWithRounding(TIMESTAMP_MILLIS));
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.JdbcErrorCode.JDBC_ERROR in project trino by trinodb.
the class PostgreSqlClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(jdbcTypeName) {
case "money":
return Optional.of(moneyColumnMapping());
case "uuid":
return Optional.of(uuidColumnMapping());
case "jsonb":
case "json":
return Optional.of(jsonColumnMapping());
case "timestamptz":
// PostgreSQL's "timestamp with time zone" is reported as Types.TIMESTAMP rather than Types.TIMESTAMP_WITH_TIMEZONE
int decimalDigits = typeHandle.getRequiredDecimalDigits();
return Optional.of(timestampWithTimeZoneColumnMapping(decimalDigits));
case "hstore":
return Optional.of(hstoreColumnMapping(session));
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
return Optional.of(booleanColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.NUMERIC:
{
int columnSize = typeHandle.getRequiredColumnSize();
int precision;
int decimalDigits = typeHandle.getDecimalDigits().orElse(0);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL) {
// decimal type with unspecified scale - up to 131072 digits before the decimal point; up to 16383 digits after the decimal point)
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
precision = columnSize;
if (precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
}
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = columnSize + max(-decimalDigits, 0);
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL || precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
}
case Types.CHAR:
return Optional.of(charColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.VARCHAR:
if (!jdbcTypeName.equals("varchar")) {
// This can be e.g. an ENUM
return Optional.of(typedVarcharColumnMapping(jdbcTypeName));
}
return Optional.of(varcharColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.BINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, (resultSet, index) -> LocalDate.parse(resultSet.getString(index), DATE_FORMATTER).toEpochDay(), dateWriteFunctionUsingLocalDate()));
case Types.TIME:
return Optional.of(timeColumnMapping(typeHandle.getRequiredDecimalDigits()));
case Types.TIMESTAMP:
TimestampType timestampType = createTimestampType(typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(timestampType, timestampReadFunction(timestampType), PostgreSqlClient::shortTimestampWriteFunction));
case Types.ARRAY:
Optional<ColumnMapping> columnMapping = arrayToTrinoType(session, connection, typeHandle);
if (columnMapping.isPresent()) {
return columnMapping;
}
break;
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.JdbcErrorCode.JDBC_ERROR in project trino by trinodb.
the class ClickHouseClient method setTableProperties.
@Override
public void setTableProperties(ConnectorSession session, JdbcTableHandle handle, Map<String, Optional<Object>> nullableProperties) {
// TODO: Support other table properties
checkArgument(nullableProperties.size() == 1 && nullableProperties.containsKey(SAMPLE_BY_PROPERTY), "Only support setting 'sample_by' property");
// TODO: Support sampling key removal when we support a newer version of ClickHouse. See https://github.com/ClickHouse/ClickHouse/pull/30180.
checkArgument(nullableProperties.values().stream().noneMatch(Optional::isEmpty), "Setting a property to null is not supported");
Map<String, Object> properties = nullableProperties.entrySet().stream().filter(entry -> entry.getValue().isPresent()).collect(toImmutableMap(Entry::getKey, entry -> entry.getValue().orElseThrow()));
ImmutableList.Builder<String> tableOptions = ImmutableList.builder();
ClickHouseTableProperties.getSampleBy(properties).ifPresent(value -> tableOptions.add("SAMPLE BY " + value));
try (Connection connection = connectionFactory.openConnection(session)) {
String sql = format("ALTER TABLE %s MODIFY %s", quoted(handle.asPlainTable().getRemoteTableName()), join(" ", tableOptions.build()));
execute(connection, sql);
} catch (SQLException e) {
throw new TrinoException(JDBC_ERROR, e);
}
}
use of io.trino.plugin.jdbc.JdbcErrorCode.JDBC_ERROR in project trino by trinodb.
the class MySqlClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(jdbcTypeName.toLowerCase(ENGLISH)) {
case "tinyint unsigned":
return Optional.of(smallintColumnMapping());
case "smallint unsigned":
return Optional.of(integerColumnMapping());
case "int unsigned":
return Optional.of(bigintColumnMapping());
case "bigint unsigned":
return Optional.of(decimalColumnMapping(createDecimalType(20)));
case "json":
return Optional.of(jsonColumnMapping());
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
// attempts to treat them as exact in comparisons may lead to problems
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.NUMERIC:
case Types.DECIMAL:
int decimalDigits = typeHandle.getDecimalDigits().orElseThrow(() -> new IllegalStateException("decimal digits not present"));
int precision = typeHandle.getRequiredColumnSize();
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
// TODO does mysql support negative scale?
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = precision + max(-decimalDigits, 0);
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0))));
case Types.CHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), false));
// TODO not all these type constants are necessarily used by the JDBC driver
case Types.VARCHAR:
case Types.NVARCHAR:
case Types.LONGVARCHAR:
case Types.LONGNVARCHAR:
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), false));
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return Optional.of(ColumnMapping.sliceMapping(VARBINARY, varbinaryReadFunction(), varbinaryWriteFunction(), FULL_PUSHDOWN));
case Types.DATE:
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIME:
TimeType timeType = createTimeType(getTimePrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(timeColumnMapping(timeType));
case Types.TIMESTAMP:
TimestampType timestampType = createTimestampType(getTimestampPrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(timestampColumnMapping(timestampType));
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.JdbcErrorCode.JDBC_ERROR in project trino by trinodb.
the class OracleClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mappingToVarchar = getForcedMappingToVarchar(typeHandle);
if (mappingToVarchar.isPresent()) {
return mappingToVarchar;
}
if (jdbcTypeName.equalsIgnoreCase("date")) {
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_SECONDS, oracleTimestampReadFunction(), trinoTimestampToOracleDateWriteFunction(), FULL_PUSHDOWN));
}
switch(typeHandle.getJdbcType()) {
case Types.SMALLINT:
return Optional.of(ColumnMapping.longMapping(SMALLINT, ResultSet::getShort, smallintWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.BINARY_FLOAT:
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), oracleRealWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.BINARY_DOUBLE:
case OracleTypes.FLOAT:
return Optional.of(ColumnMapping.doubleMapping(DOUBLE, ResultSet::getDouble, oracleDoubleWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.NUMBER:
int actualPrecision = typeHandle.getRequiredColumnSize();
int decimalDigits = typeHandle.getRequiredDecimalDigits();
// Map negative scale to decimal(p+s, 0).
int precision = actualPrecision + max(-decimalDigits, 0);
int scale = max(decimalDigits, 0);
Optional<Integer> numberDefaultScale = getNumberDefaultScale(session);
RoundingMode roundingMode = getNumberRoundingMode(session);
if (precision < scale) {
if (roundingMode == RoundingMode.UNNECESSARY) {
break;
}
scale = min(Decimals.MAX_PRECISION, scale);
precision = scale;
} else if (numberDefaultScale.isPresent() && precision == PRECISION_OF_UNSPECIFIED_NUMBER) {
precision = Decimals.MAX_PRECISION;
scale = numberDefaultScale.get();
} else if (precision > Decimals.MAX_PRECISION || actualPrecision <= 0) {
break;
}
DecimalType decimalType = createDecimalType(precision, scale);
// JDBC driver can return BigDecimal with lower scale than column's scale when there are trailing zeroes
if (decimalType.isShort()) {
return Optional.of(ColumnMapping.longMapping(decimalType, shortDecimalReadFunction(decimalType, roundingMode), shortDecimalWriteFunction(decimalType), FULL_PUSHDOWN));
}
return Optional.of(ColumnMapping.objectMapping(decimalType, longDecimalReadFunction(decimalType, roundingMode), longDecimalWriteFunction(decimalType), FULL_PUSHDOWN));
case OracleTypes.CHAR:
case OracleTypes.NCHAR:
CharType charType = createCharType(typeHandle.getRequiredColumnSize());
return Optional.of(ColumnMapping.sliceMapping(charType, charReadFunction(charType), oracleCharWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.VARCHAR:
case OracleTypes.NVARCHAR:
return Optional.of(ColumnMapping.sliceMapping(createVarcharType(typeHandle.getRequiredColumnSize()), (varcharResultSet, varcharColumnIndex) -> utf8Slice(varcharResultSet.getString(varcharColumnIndex)), varcharWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.CLOB:
case OracleTypes.NCLOB:
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), (resultSet, columnIndex) -> utf8Slice(resultSet.getString(columnIndex)), varcharWriteFunction(), DISABLE_PUSHDOWN));
// Oracle's RAW(n)
case OracleTypes.VARBINARY:
case OracleTypes.BLOB:
return Optional.of(ColumnMapping.sliceMapping(VARBINARY, (resultSet, columnIndex) -> wrappedBuffer(resultSet.getBytes(columnIndex)), varbinaryWriteFunction(), DISABLE_PUSHDOWN));
case OracleTypes.TIMESTAMP:
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_MILLIS, oracleTimestampReadFunction(), trinoTimestampToOracleTimestampWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.TIMESTAMPTZ:
return Optional.of(oracleTimestampWithTimeZoneColumnMapping());
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
Aggregations