use of io.trino.spi.type.DateType.DATE in project trino by trinodb.
the class TupleDomainParquetPredicate method getDomain.
/**
* Get a domain for the ranges defined by each pair of elements from {@code minimums} and {@code maximums}.
* Both arrays must have the same length.
*/
private static Domain getDomain(ColumnDescriptor column, Type type, List<Object> minimums, List<Object> maximums, boolean hasNullValue, DateTimeZone timeZone) {
checkArgument(minimums.size() == maximums.size(), "Expected minimums and maximums to have the same size");
if (type.equals(BOOLEAN)) {
boolean hasTrueValues = minimums.stream().anyMatch(value -> (boolean) value) || maximums.stream().anyMatch(value -> (boolean) value);
boolean hasFalseValues = minimums.stream().anyMatch(value -> !(boolean) value) || maximums.stream().anyMatch(value -> !(boolean) value);
if (hasTrueValues && hasFalseValues) {
return Domain.all(type);
}
if (hasTrueValues) {
return Domain.create(ValueSet.of(type, true), hasNullValue);
}
if (hasFalseValues) {
return Domain.create(ValueSet.of(type, false), hasNullValue);
}
// All nulls case is handled earlier
throw new VerifyException("Impossible boolean statistics");
}
if (type.equals(BIGINT) || type.equals(INTEGER) || type.equals(DATE) || type.equals(SMALLINT) || type.equals(TINYINT)) {
List<Range> ranges = new ArrayList<>();
for (int i = 0; i < minimums.size(); i++) {
long min = asLong(minimums.get(i));
long max = asLong(maximums.get(i));
if (isStatisticsOverflow(type, min, max)) {
return Domain.create(ValueSet.all(type), hasNullValue);
}
ranges.add(Range.range(type, min, true, max, true));
}
return Domain.create(ValueSet.ofRanges(ranges), hasNullValue);
}
if (type instanceof DecimalType) {
DecimalType decimalType = (DecimalType) type;
List<Range> ranges = new ArrayList<>();
if (decimalType.isShort()) {
for (int i = 0; i < minimums.size(); i++) {
Object min = minimums.get(i);
Object max = maximums.get(i);
long minValue = min instanceof Binary ? getShortDecimalValue(((Binary) min).getBytes()) : asLong(min);
long maxValue = min instanceof Binary ? getShortDecimalValue(((Binary) max).getBytes()) : asLong(max);
if (isStatisticsOverflow(type, minValue, maxValue)) {
return Domain.create(ValueSet.all(type), hasNullValue);
}
ranges.add(Range.range(type, minValue, true, maxValue, true));
}
} else {
for (int i = 0; i < minimums.size(); i++) {
Int128 min = Int128.fromBigEndian(((Binary) minimums.get(i)).getBytes());
Int128 max = Int128.fromBigEndian(((Binary) maximums.get(i)).getBytes());
ranges.add(Range.range(type, min, true, max, true));
}
}
return Domain.create(ValueSet.ofRanges(ranges), hasNullValue);
}
if (type.equals(REAL)) {
List<Range> ranges = new ArrayList<>();
for (int i = 0; i < minimums.size(); i++) {
Float min = (Float) minimums.get(i);
Float max = (Float) maximums.get(i);
if (min.isNaN() || max.isNaN()) {
return Domain.create(ValueSet.all(type), hasNullValue);
}
ranges.add(Range.range(type, (long) floatToRawIntBits(min), true, (long) floatToRawIntBits(max), true));
}
return Domain.create(ValueSet.ofRanges(ranges), hasNullValue);
}
if (type.equals(DOUBLE)) {
List<Range> ranges = new ArrayList<>();
for (int i = 0; i < minimums.size(); i++) {
Double min = (Double) minimums.get(i);
Double max = (Double) maximums.get(i);
if (min.isNaN() || max.isNaN()) {
return Domain.create(ValueSet.all(type), hasNullValue);
}
ranges.add(Range.range(type, min, true, max, true));
}
return Domain.create(ValueSet.ofRanges(ranges), hasNullValue);
}
if (type instanceof VarcharType) {
List<Range> ranges = new ArrayList<>();
for (int i = 0; i < minimums.size(); i++) {
Slice min = Slices.wrappedBuffer(((Binary) minimums.get(i)).toByteBuffer());
Slice max = Slices.wrappedBuffer(((Binary) maximums.get(i)).toByteBuffer());
ranges.add(Range.range(type, min, true, max, true));
}
return Domain.create(ValueSet.ofRanges(ranges), hasNullValue);
}
if (type instanceof TimestampType) {
if (column.getPrimitiveType().getPrimitiveTypeName().equals(INT96)) {
TrinoTimestampEncoder<?> timestampEncoder = createTimestampEncoder((TimestampType) type, timeZone);
List<Object> values = new ArrayList<>();
for (int i = 0; i < minimums.size(); i++) {
Object min = minimums.get(i);
Object max = maximums.get(i);
// available and valid in that special case
if (!(min instanceof Binary) || !(max instanceof Binary) || !min.equals(max)) {
return Domain.create(ValueSet.all(type), hasNullValue);
}
values.add(timestampEncoder.getTimestamp(decodeInt96Timestamp((Binary) min)));
}
return Domain.multipleValues(type, values, hasNullValue);
}
if (column.getPrimitiveType().getPrimitiveTypeName().equals(INT64)) {
LogicalTypeAnnotation logicalTypeAnnotation = column.getPrimitiveType().getLogicalTypeAnnotation();
if (!(logicalTypeAnnotation instanceof TimestampLogicalTypeAnnotation)) {
// Invalid statistics. Unit and UTC adjustment are not known
return Domain.create(ValueSet.all(type), hasNullValue);
}
TimestampLogicalTypeAnnotation timestampTypeAnnotation = (TimestampLogicalTypeAnnotation) logicalTypeAnnotation;
// Bail out if the precision is not known
if (timestampTypeAnnotation.getUnit() == null) {
return Domain.create(ValueSet.all(type), hasNullValue);
}
TrinoTimestampEncoder<?> timestampEncoder = createTimestampEncoder((TimestampType) type, DateTimeZone.UTC);
List<Range> ranges = new ArrayList<>();
for (int i = 0; i < minimums.size(); i++) {
long min = (long) minimums.get(i);
long max = (long) maximums.get(i);
ranges.add(Range.range(type, timestampEncoder.getTimestamp(decodeInt64Timestamp(min, timestampTypeAnnotation.getUnit())), true, timestampEncoder.getTimestamp(decodeInt64Timestamp(max, timestampTypeAnnotation.getUnit())), true));
}
return Domain.create(ValueSet.ofRanges(ranges), hasNullValue);
}
}
return Domain.create(ValueSet.all(type), hasNullValue);
}
use of io.trino.spi.type.DateType.DATE in project trino by trinodb.
the class OracleClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mappingToVarchar = getForcedMappingToVarchar(typeHandle);
if (mappingToVarchar.isPresent()) {
return mappingToVarchar;
}
if (jdbcTypeName.equalsIgnoreCase("date")) {
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_SECONDS, oracleTimestampReadFunction(), trinoTimestampToOracleDateWriteFunction(), FULL_PUSHDOWN));
}
switch(typeHandle.getJdbcType()) {
case Types.SMALLINT:
return Optional.of(ColumnMapping.longMapping(SMALLINT, ResultSet::getShort, smallintWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.BINARY_FLOAT:
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), oracleRealWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.BINARY_DOUBLE:
case OracleTypes.FLOAT:
return Optional.of(ColumnMapping.doubleMapping(DOUBLE, ResultSet::getDouble, oracleDoubleWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.NUMBER:
int actualPrecision = typeHandle.getRequiredColumnSize();
int decimalDigits = typeHandle.getRequiredDecimalDigits();
// Map negative scale to decimal(p+s, 0).
int precision = actualPrecision + max(-decimalDigits, 0);
int scale = max(decimalDigits, 0);
Optional<Integer> numberDefaultScale = getNumberDefaultScale(session);
RoundingMode roundingMode = getNumberRoundingMode(session);
if (precision < scale) {
if (roundingMode == RoundingMode.UNNECESSARY) {
break;
}
scale = min(Decimals.MAX_PRECISION, scale);
precision = scale;
} else if (numberDefaultScale.isPresent() && precision == PRECISION_OF_UNSPECIFIED_NUMBER) {
precision = Decimals.MAX_PRECISION;
scale = numberDefaultScale.get();
} else if (precision > Decimals.MAX_PRECISION || actualPrecision <= 0) {
break;
}
DecimalType decimalType = createDecimalType(precision, scale);
// JDBC driver can return BigDecimal with lower scale than column's scale when there are trailing zeroes
if (decimalType.isShort()) {
return Optional.of(ColumnMapping.longMapping(decimalType, shortDecimalReadFunction(decimalType, roundingMode), shortDecimalWriteFunction(decimalType), FULL_PUSHDOWN));
}
return Optional.of(ColumnMapping.objectMapping(decimalType, longDecimalReadFunction(decimalType, roundingMode), longDecimalWriteFunction(decimalType), FULL_PUSHDOWN));
case OracleTypes.CHAR:
case OracleTypes.NCHAR:
CharType charType = createCharType(typeHandle.getRequiredColumnSize());
return Optional.of(ColumnMapping.sliceMapping(charType, charReadFunction(charType), oracleCharWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.VARCHAR:
case OracleTypes.NVARCHAR:
return Optional.of(ColumnMapping.sliceMapping(createVarcharType(typeHandle.getRequiredColumnSize()), (varcharResultSet, varcharColumnIndex) -> utf8Slice(varcharResultSet.getString(varcharColumnIndex)), varcharWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.CLOB:
case OracleTypes.NCLOB:
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), (resultSet, columnIndex) -> utf8Slice(resultSet.getString(columnIndex)), varcharWriteFunction(), DISABLE_PUSHDOWN));
// Oracle's RAW(n)
case OracleTypes.VARBINARY:
case OracleTypes.BLOB:
return Optional.of(ColumnMapping.sliceMapping(VARBINARY, (resultSet, columnIndex) -> wrappedBuffer(resultSet.getBytes(columnIndex)), varbinaryWriteFunction(), DISABLE_PUSHDOWN));
case OracleTypes.TIMESTAMP:
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_MILLIS, oracleTimestampReadFunction(), trinoTimestampToOracleTimestampWriteFunction(), FULL_PUSHDOWN));
case OracleTypes.TIMESTAMPTZ:
return Optional.of(oracleTimestampWithTimeZoneColumnMapping());
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.spi.type.DateType.DATE in project trino by trinodb.
the class SingleStoreClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
Optional<ColumnMapping> unsignedMapping = getUnsignedMapping(typeHandle);
if (unsignedMapping.isPresent()) {
return unsignedMapping;
}
if (jdbcTypeName.equalsIgnoreCase("json")) {
return Optional.of(jsonColumnMapping());
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
case Types.BOOLEAN:
return Optional.of(booleanColumnMapping());
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
// attempts to treat them as exact in comparisons may lead to problems
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.CHAR:
case // TODO it it is dummy copied from StandardColumnMappings, verify if it is proper mapping
Types.NCHAR:
return Optional.of(defaultCharColumnMapping(typeHandle.getRequiredColumnSize(), false));
case Types.VARCHAR:
case Types.LONGVARCHAR:
return Optional.of(defaultVarcharColumnMapping(typeHandle.getRequiredColumnSize(), false));
case Types.DECIMAL:
int precision = typeHandle.getRequiredColumnSize();
int decimalDigits = typeHandle.getRequiredDecimalDigits();
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
if (precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0))));
case Types.BINARY:
case Types.VARBINARY:
case Types.LONGVARBINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, dateReadFunctionUsingLocalDate(), dateWriteFunction()));
case Types.TIME:
TimeType timeType = createTimeType(getTimePrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(ColumnMapping.longMapping(timeType, singleStoreTimeReadFunction(timeType), timeWriteFunction(timeType.getPrecision())));
case Types.TIMESTAMP:
// TODO (https://github.com/trinodb/trino/issues/5450) Fix DST handling
TimestampType timestampType = createTimestampType(getTimestampPrecision(typeHandle.getRequiredColumnSize()));
return Optional.of(timestampColumnMapping(timestampType));
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
Aggregations