use of io.trino.plugin.jdbc.PredicatePushdownController in project trino by trinodb.
the class PhoenixClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(typeHandle.getJdbcType()) {
case Types.BOOLEAN:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
Optional<Integer> columnSize = typeHandle.getColumnSize();
int precision = columnSize.orElse(DEFAULT_PRECISION);
int decimalDigits = typeHandle.getDecimalDigits().orElse(DEFAULT_SCALE);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize.isEmpty()) {
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
}
// TODO does phoenix support negative scale?
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = precision + max(-decimalDigits, 0);
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
case Types.CHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case VARCHAR:
case NVARCHAR:
case LONGVARCHAR:
case LONGNVARCHAR:
if (typeHandle.getColumnSize().isEmpty()) {
return Optional.of(varcharColumnMapping(createUnboundedVarcharType(), true));
}
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case Types.BINARY:
case Types.VARBINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, dateReadFunction(), dateWriteFunctionUsingString()));
// TODO add support for TIMESTAMP after Phoenix adds support for LocalDateTime
case TIMESTAMP:
case TIME_WITH_TIMEZONE:
case TIMESTAMP_WITH_TIMEZONE:
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
case ARRAY:
JdbcTypeHandle elementTypeHandle = getArrayElementTypeHandle(typeHandle);
if (elementTypeHandle.getJdbcType() == Types.VARBINARY) {
return Optional.empty();
}
return toColumnMapping(session, connection, elementTypeHandle).map(elementMapping -> {
ArrayType trinoArrayType = new ArrayType(elementMapping.getType());
String jdbcTypeName = elementTypeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(PHOENIX_METADATA_ERROR, "Type name is missing for jdbc type: " + JDBCType.valueOf(elementTypeHandle.getJdbcType())));
// TODO (https://github.com/trinodb/trino/issues/11132) Enable predicate pushdown on ARRAY(CHAR) type in Phoenix
PredicatePushdownController pushdownController = elementTypeHandle.getJdbcType() == Types.CHAR ? DISABLE_PUSHDOWN : FULL_PUSHDOWN;
return arrayColumnMapping(session, trinoArrayType, jdbcTypeName, pushdownController);
});
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.plugin.jdbc.PredicatePushdownController in project trino by trinodb.
the class PhoenixClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(typeHandle.getJdbcType()) {
case Types.BOOLEAN:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
Optional<Integer> columnSize = typeHandle.getColumnSize();
int precision = columnSize.orElse(DEFAULT_PRECISION);
int decimalDigits = typeHandle.getDecimalDigits().orElse(DEFAULT_SCALE);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize.isEmpty()) {
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
}
// TODO does phoenix support negative scale?
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = precision + max(-decimalDigits, 0);
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
case Types.CHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case VARCHAR:
case NVARCHAR:
case LONGVARCHAR:
case LONGNVARCHAR:
if (typeHandle.getColumnSize().isEmpty()) {
return Optional.of(varcharColumnMapping(createUnboundedVarcharType(), true));
}
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), true));
case Types.BINARY:
case Types.VARBINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, dateReadFunction(), dateWriteFunctionUsingString()));
// TODO add support for TIMESTAMP after Phoenix adds support for LocalDateTime
case TIMESTAMP:
case TIME_WITH_TIMEZONE:
case TIMESTAMP_WITH_TIMEZONE:
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
case ARRAY:
JdbcTypeHandle elementTypeHandle = getArrayElementTypeHandle(typeHandle);
if (elementTypeHandle.getJdbcType() == Types.VARBINARY) {
return Optional.empty();
}
return toColumnMapping(session, connection, elementTypeHandle).map(elementMapping -> {
ArrayType trinoArrayType = new ArrayType(elementMapping.getType());
String jdbcTypeName = elementTypeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(PHOENIX_METADATA_ERROR, "Type name is missing for jdbc type: " + JDBCType.valueOf(elementTypeHandle.getJdbcType())));
// TODO (https://github.com/trinodb/trino/issues/11132) Enable predicate pushdown on ARRAY(CHAR) type in Phoenix
PredicatePushdownController pushdown = elementTypeHandle.getJdbcType() == Types.CHAR ? DISABLE_PUSHDOWN : FULL_PUSHDOWN;
return arrayColumnMapping(session, trinoArrayType, jdbcTypeName, pushdown);
});
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
Aggregations