use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class ClickHouseClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
ClickHouseColumn column = ClickHouseColumn.of("", jdbcTypeName);
ClickHouseDataType columnDataType = column.getDataType();
switch(columnDataType) {
case UInt8:
return Optional.of(ColumnMapping.longMapping(SMALLINT, ResultSet::getShort, uInt8WriteFunction()));
case UInt16:
return Optional.of(ColumnMapping.longMapping(INTEGER, ResultSet::getInt, uInt16WriteFunction()));
case UInt32:
return Optional.of(ColumnMapping.longMapping(BIGINT, ResultSet::getLong, uInt32WriteFunction()));
case UInt64:
return Optional.of(ColumnMapping.objectMapping(UINT64_TYPE, longDecimalReadFunction(UINT64_TYPE, UNNECESSARY), uInt64WriteFunction()));
case IPv4:
return Optional.of(ipAddressColumnMapping("IPv4StringToNum(?)"));
case IPv6:
return Optional.of(ipAddressColumnMapping("IPv6StringToNum(?)"));
case Enum8:
case Enum16:
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) Currently pushdown would not work and may require a custom bind expression
DISABLE_PUSHDOWN));
// FixedString(n)
case FixedString:
case String:
if (isMapStringAsVarchar(session)) {
return Optional.of(ColumnMapping.sliceMapping(createUnboundedVarcharType(), varcharReadFunction(createUnboundedVarcharType()), varcharWriteFunction(), DISABLE_PUSHDOWN));
}
// TODO (https://github.com/trinodb/trino/issues/7100) test & enable predicate pushdown
return Optional.of(varbinaryColumnMapping());
case UUID:
return Optional.of(uuidColumnMapping());
default:
}
switch(typeHandle.getJdbcType()) {
case Types.TINYINT:
return Optional.of(tinyintColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.FLOAT:
case Types.REAL:
return Optional.of(ColumnMapping.longMapping(REAL, (resultSet, columnIndex) -> floatToRawIntBits(resultSet.getFloat(columnIndex)), realWriteFunction(), DISABLE_PUSHDOWN));
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.DECIMAL:
int decimalDigits = typeHandle.getRequiredDecimalDigits();
int precision = typeHandle.getRequiredColumnSize();
ColumnMapping decimalColumnMapping;
if (getDecimalRounding(session) == ALLOW_OVERFLOW && precision > Decimals.MAX_PRECISION) {
int scale = Math.min(decimalDigits, getDecimalDefaultScale(session));
decimalColumnMapping = decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session));
} else {
decimalColumnMapping = decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)));
}
return Optional.of(new ColumnMapping(decimalColumnMapping.getType(), decimalColumnMapping.getReadFunction(), decimalColumnMapping.getWriteFunction(), // TODO (https://github.com/trinodb/trino/issues/7100) fix, enable and test decimal pushdown
DISABLE_PUSHDOWN));
case Types.DATE:
return Optional.of(dateColumnMappingUsingLocalDate());
case Types.TIMESTAMP:
if (columnDataType == ClickHouseDataType.DateTime) {
verify(typeHandle.getRequiredDecimalDigits() == 0, "Expected 0 as timestamp precision, but got %s", typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(TIMESTAMP_SECONDS, timestampReadFunction(TIMESTAMP_SECONDS), timestampSecondsWriteFunction()));
}
// TODO (https://github.com/trinodb/trino/issues/10537) Add support for Datetime64 type
return Optional.of(timestampColumnMappingUsingSqlTimestampWithRounding(TIMESTAMP_MILLIS));
}
return Optional.empty();
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class MapToMapCast method buildProcessor.
/**
* The signature of the returned MethodHandle is (Block fromMap, int position, ConnectorSession session, BlockBuilder mapBlockBuilder)void.
* The processor will get the value from fromMap, cast it and write to toBlock.
*/
private MethodHandle buildProcessor(FunctionDependencies functionDependencies, Type fromType, Type toType, boolean isKey) {
// Get block position cast, with optional connector session
FunctionNullability functionNullability = functionDependencies.getCastNullability(fromType, toType);
InvocationConvention invocationConvention = new InvocationConvention(ImmutableList.of(BLOCK_POSITION), functionNullability.isReturnNullable() ? NULLABLE_RETURN : FAIL_ON_NULL, true, false);
MethodHandle cast = functionDependencies.getCastInvoker(fromType, toType, invocationConvention).getMethodHandle();
// Normalize cast to have connector session as first argument
if (cast.type().parameterArray()[0] != ConnectorSession.class) {
cast = MethodHandles.dropArguments(cast, 0, ConnectorSession.class);
}
// Change cast signature to (Block.class, int.class, ConnectorSession.class):T
cast = permuteArguments(cast, methodType(cast.type().returnType(), Block.class, int.class, ConnectorSession.class), 2, 0, 1);
// If the key cast function is nullable, check the result is not null
if (isKey && functionNullability.isReturnNullable()) {
cast = compose(nullChecker(cast.type().returnType()), cast);
}
// get write method with signature: (T, BlockBuilder.class):void
MethodHandle writer = nativeValueWriter(toType);
writer = permuteArguments(writer, methodType(void.class, writer.type().parameterArray()[1], BlockBuilder.class), 1, 0);
// ensure cast returns type expected by the writer
cast = cast.asType(methodType(writer.type().parameterType(0), cast.type().parameterArray()));
return compose(writer, cast);
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class FormatFunction method valueConverter.
private static BiFunction<ConnectorSession, Block, Object> valueConverter(FunctionDependencies functionDependencies, Type type, int position) {
if (type.equals(UNKNOWN)) {
return (session, block) -> null;
}
if (type.equals(BOOLEAN)) {
return (session, block) -> type.getBoolean(block, position);
}
if (type.equals(TINYINT) || type.equals(SMALLINT) || type.equals(INTEGER) || type.equals(BIGINT)) {
return (session, block) -> type.getLong(block, position);
}
if (type.equals(REAL)) {
return (session, block) -> intBitsToFloat(toIntExact(type.getLong(block, position)));
}
if (type.equals(DOUBLE)) {
return (session, block) -> type.getDouble(block, position);
}
if (type.equals(DATE)) {
return (session, block) -> LocalDate.ofEpochDay(type.getLong(block, position));
}
if (type instanceof TimestampWithTimeZoneType) {
return (session, block) -> toZonedDateTime(((TimestampWithTimeZoneType) type), block, position);
}
if (type instanceof TimestampType) {
return (session, block) -> toLocalDateTime(((TimestampType) type), block, position);
}
if (type instanceof TimeType) {
return (session, block) -> toLocalTime(type.getLong(block, position));
}
// TODO: support TIME WITH TIME ZONE by https://github.com/trinodb/trino/issues/191 + mapping to java.time.OffsetTime
if (type.equals(JSON)) {
MethodHandle handle = functionDependencies.getFunctionInvoker(QualifiedName.of("json_format"), ImmutableList.of(JSON), simpleConvention(FAIL_ON_NULL, NEVER_NULL)).getMethodHandle();
return (session, block) -> convertToString(handle, type.getSlice(block, position));
}
if (isShortDecimal(type)) {
int scale = ((DecimalType) type).getScale();
return (session, block) -> BigDecimal.valueOf(type.getLong(block, position), scale);
}
if (isLongDecimal(type)) {
int scale = ((DecimalType) type).getScale();
return (session, block) -> new BigDecimal(((Int128) type.getObject(block, position)).toBigInteger(), scale);
}
if (type instanceof VarcharType) {
return (session, block) -> type.getSlice(block, position).toStringUtf8();
}
if (type instanceof CharType) {
CharType charType = (CharType) type;
return (session, block) -> padSpaces(type.getSlice(block, position), charType).toStringUtf8();
}
BiFunction<ConnectorSession, Block, Object> function;
if (type.getJavaType() == long.class) {
function = (session, block) -> type.getLong(block, position);
} else if (type.getJavaType() == double.class) {
function = (session, block) -> type.getDouble(block, position);
} else if (type.getJavaType() == boolean.class) {
function = (session, block) -> type.getBoolean(block, position);
} else if (type.getJavaType() == Slice.class) {
function = (session, block) -> type.getSlice(block, position);
} else {
function = (session, block) -> type.getObject(block, position);
}
MethodHandle handle = functionDependencies.getCastInvoker(type, VARCHAR, simpleConvention(FAIL_ON_NULL, NEVER_NULL)).getMethodHandle();
return (session, block) -> convertToString(handle, function.apply(session, block));
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class TestHiveGlueMetastore method testUpdatePartitionedStatisticsOnCreate.
@Test
public void testUpdatePartitionedStatisticsOnCreate() {
SchemaTableName tableName = temporaryTable("update_partitioned_statistics_create");
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
List<ColumnMetadata> columns = ImmutableList.of(new ColumnMetadata("a_column", BigintType.BIGINT), new ColumnMetadata("part_column", BigintType.BIGINT));
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(TEXTFILE, ImmutableList.of("part_column")));
ConnectorOutputTableHandle createTableHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write data
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, createTableHandle);
MaterializedResult data = MaterializedResult.resultBuilder(session, BigintType.BIGINT, BigintType.BIGINT).row(1L, 1L).row(2L, 1L).row(3L, 1L).row(4L, 2L).row(5L, 2L).build();
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// prepare statistics
ComputedStatistics statistics1 = ComputedStatistics.builder(ImmutableList.of("part_column"), ImmutableList.of(singleValueBlock(1))).addTableStatistic(TableStatisticType.ROW_COUNT, singleValueBlock(3)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MIN_VALUE), singleValueBlock(1)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MAX_VALUE), singleValueBlock(3)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_DISTINCT_VALUES), singleValueBlock(3)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_NON_NULL_VALUES), singleValueBlock(3)).build();
ComputedStatistics statistics2 = ComputedStatistics.builder(ImmutableList.of("part_column"), ImmutableList.of(singleValueBlock(2))).addTableStatistic(TableStatisticType.ROW_COUNT, singleValueBlock(2)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MIN_VALUE), singleValueBlock(4)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MAX_VALUE), singleValueBlock(5)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_DISTINCT_VALUES), singleValueBlock(2)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_NON_NULL_VALUES), singleValueBlock(2)).build();
// finish CTAS
metadata.finishCreateTable(session, createTableHandle, fragments, ImmutableList.of(statistics1, statistics2));
transaction.commit();
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.ConnectorSession in project trino by trinodb.
the class TestHiveGlueMetastore method testUpdateStatisticsOnCreate.
@Test
public void testUpdateStatisticsOnCreate() {
SchemaTableName tableName = temporaryTable("update_statistics_create");
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
List<ColumnMetadata> columns = ImmutableList.of(new ColumnMetadata("a_column", BigintType.BIGINT));
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(TEXTFILE));
ConnectorOutputTableHandle createTableHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write data
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, createTableHandle);
MaterializedResult data = MaterializedResult.resultBuilder(session, BigintType.BIGINT).row(1L).row(2L).row(3L).row(4L).row(5L).build();
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// prepare statistics
ComputedStatistics statistics = ComputedStatistics.builder(ImmutableList.of(), ImmutableList.of()).addTableStatistic(TableStatisticType.ROW_COUNT, singleValueBlock(5)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MIN_VALUE), singleValueBlock(1)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MAX_VALUE), singleValueBlock(5)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_DISTINCT_VALUES), singleValueBlock(5)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_NON_NULL_VALUES), singleValueBlock(5)).build();
// finish CTAS
metadata.finishCreateTable(session, createTableHandle, fragments, ImmutableList.of(statistics));
transaction.commit();
} finally {
dropTable(tableName);
}
}
Aggregations