use of io.trino.spi.type.VarcharType.VARCHAR in project trino by trinodb.
the class BaseRaptorConnectorTest method testTablesSystemTable.
@Test
public void testTablesSystemTable() {
assertUpdate("" + "CREATE TABLE system_tables_test0 (c00 timestamp, c01 varchar, c02 double, c03 bigint, c04 bigint)");
assertUpdate("" + "CREATE TABLE system_tables_test1 (c10 timestamp, c11 varchar, c12 double, c13 bigint, c14 bigint) " + "WITH (temporal_column = 'c10')");
assertUpdate("" + "CREATE TABLE system_tables_test2 (c20 timestamp, c21 varchar, c22 double, c23 bigint, c24 bigint) " + "WITH (temporal_column = 'c20', ordering = ARRAY['c22', 'c21'])");
assertUpdate("" + "CREATE TABLE system_tables_test3 (c30 timestamp, c31 varchar, c32 double, c33 bigint, c34 bigint) " + "WITH (temporal_column = 'c30', bucket_count = 40, bucketed_on = ARRAY ['c34', 'c33'])");
assertUpdate("" + "CREATE TABLE system_tables_test4 (c40 timestamp, c41 varchar, c42 double, c43 bigint, c44 bigint) " + "WITH (temporal_column = 'c40', ordering = ARRAY['c41', 'c42'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c43', 'c44'])");
assertUpdate("" + "CREATE TABLE system_tables_test5 (c50 timestamp, c51 varchar, c52 double, c53 bigint, c54 bigint) " + "WITH (ordering = ARRAY['c51', 'c52'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c53', 'c54'], organized = true)");
MaterializedResult actualResults = computeActual("SELECT * FROM system.tables");
assertEquals(actualResults.getTypes(), ImmutableList.builder().add(// table_schema
VARCHAR).add(// table_name
VARCHAR).add(// temporal_column
VARCHAR).add(// ordering_columns
new ArrayType(VARCHAR)).add(// distribution_name
VARCHAR).add(// bucket_count
BIGINT).add(// bucket_columns
new ArrayType(VARCHAR)).add(// organized
BOOLEAN).build());
Map<String, MaterializedRow> map = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).collect(toImmutableMap(row -> ((String) row.getField(1)), identity()));
assertEquals(map.size(), 6);
assertEquals(map.get("system_tables_test0").getFields(), asList("tpch", "system_tables_test0", null, null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test1").getFields(), asList("tpch", "system_tables_test1", "c10", null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test2").getFields(), asList("tpch", "system_tables_test2", "c20", ImmutableList.of("c22", "c21"), null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test3").getFields(), asList("tpch", "system_tables_test3", "c30", null, null, 40L, ImmutableList.of("c34", "c33"), Boolean.FALSE));
assertEquals(map.get("system_tables_test4").getFields(), asList("tpch", "system_tables_test4", "c40", ImmutableList.of("c41", "c42"), "test_distribution", 50L, ImmutableList.of("c43", "c44"), Boolean.FALSE));
assertEquals(map.get("system_tables_test5").getFields(), asList("tpch", "system_tables_test5", null, ImmutableList.of("c51", "c52"), "test_distribution", 50L, ImmutableList.of("c53", "c54"), Boolean.TRUE));
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch'");
long actualRowCount = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).count();
assertEquals(actualRowCount, 6);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("" + "SELECT distribution_name, bucket_count, bucketing_columns, ordering_columns, temporal_column, organized " + "FROM system.tables " + "WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getTypes(), ImmutableList.of(VARCHAR, BIGINT, new ArrayType(VARCHAR), new ArrayType(VARCHAR), VARCHAR, BOOLEAN));
assertEquals(actualResults.getMaterializedRows().size(), 1);
assertUpdate("DROP TABLE system_tables_test0");
assertUpdate("DROP TABLE system_tables_test1");
assertUpdate("DROP TABLE system_tables_test2");
assertUpdate("DROP TABLE system_tables_test3");
assertUpdate("DROP TABLE system_tables_test4");
assertUpdate("DROP TABLE system_tables_test5");
assertEquals(computeActual("SELECT * FROM system.tables WHERE table_schema IN ('foo', 'bar')").getRowCount(), 0);
}
use of io.trino.spi.type.VarcharType.VARCHAR in project trino by trinodb.
the class PostgreSqlClient method toColumnMapping.
@Override
public Optional<ColumnMapping> toColumnMapping(ConnectorSession session, Connection connection, JdbcTypeHandle typeHandle) {
String jdbcTypeName = typeHandle.getJdbcTypeName().orElseThrow(() -> new TrinoException(JDBC_ERROR, "Type name is missing: " + typeHandle));
Optional<ColumnMapping> mapping = getForcedMappingToVarchar(typeHandle);
if (mapping.isPresent()) {
return mapping;
}
switch(jdbcTypeName) {
case "money":
return Optional.of(moneyColumnMapping());
case "uuid":
return Optional.of(uuidColumnMapping());
case "jsonb":
case "json":
return Optional.of(jsonColumnMapping());
case "timestamptz":
// PostgreSQL's "timestamp with time zone" is reported as Types.TIMESTAMP rather than Types.TIMESTAMP_WITH_TIMEZONE
int decimalDigits = typeHandle.getRequiredDecimalDigits();
return Optional.of(timestampWithTimeZoneColumnMapping(decimalDigits));
case "hstore":
return Optional.of(hstoreColumnMapping(session));
}
switch(typeHandle.getJdbcType()) {
case Types.BIT:
return Optional.of(booleanColumnMapping());
case Types.SMALLINT:
return Optional.of(smallintColumnMapping());
case Types.INTEGER:
return Optional.of(integerColumnMapping());
case Types.BIGINT:
return Optional.of(bigintColumnMapping());
case Types.REAL:
return Optional.of(realColumnMapping());
case Types.DOUBLE:
return Optional.of(doubleColumnMapping());
case Types.NUMERIC:
{
int columnSize = typeHandle.getRequiredColumnSize();
int precision;
int decimalDigits = typeHandle.getDecimalDigits().orElse(0);
if (getDecimalRounding(session) == ALLOW_OVERFLOW) {
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL) {
// decimal type with unspecified scale - up to 131072 digits before the decimal point; up to 16383 digits after the decimal point)
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, getDecimalDefaultScale(session)), getDecimalRoundingMode(session)));
}
precision = columnSize;
if (precision > Decimals.MAX_PRECISION) {
int scale = min(decimalDigits, getDecimalDefaultScale(session));
return Optional.of(decimalColumnMapping(createDecimalType(Decimals.MAX_PRECISION, scale), getDecimalRoundingMode(session)));
}
}
// Map decimal(p, -s) (negative scale) to decimal(p+s, 0).
precision = columnSize + max(-decimalDigits, 0);
if (columnSize == PRECISION_OF_UNSPECIFIED_DECIMAL || precision > Decimals.MAX_PRECISION) {
break;
}
return Optional.of(decimalColumnMapping(createDecimalType(precision, max(decimalDigits, 0)), UNNECESSARY));
}
case Types.CHAR:
return Optional.of(charColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.VARCHAR:
if (!jdbcTypeName.equals("varchar")) {
// This can be e.g. an ENUM
return Optional.of(typedVarcharColumnMapping(jdbcTypeName));
}
return Optional.of(varcharColumnMapping(typeHandle.getRequiredColumnSize()));
case Types.BINARY:
return Optional.of(varbinaryColumnMapping());
case Types.DATE:
return Optional.of(ColumnMapping.longMapping(DATE, (resultSet, index) -> LocalDate.parse(resultSet.getString(index), DATE_FORMATTER).toEpochDay(), dateWriteFunctionUsingLocalDate()));
case Types.TIME:
return Optional.of(timeColumnMapping(typeHandle.getRequiredDecimalDigits()));
case Types.TIMESTAMP:
TimestampType timestampType = createTimestampType(typeHandle.getRequiredDecimalDigits());
return Optional.of(ColumnMapping.longMapping(timestampType, timestampReadFunction(timestampType), PostgreSqlClient::shortTimestampWriteFunction));
case Types.ARRAY:
Optional<ColumnMapping> columnMapping = arrayToTrinoType(session, connection, typeHandle);
if (columnMapping.isPresent()) {
return columnMapping;
}
break;
}
if (getUnsupportedTypeHandling(session) == CONVERT_TO_VARCHAR) {
return mapToUnboundedVarchar(typeHandle);
}
return Optional.empty();
}
use of io.trino.spi.type.VarcharType.VARCHAR in project trino by trinodb.
the class TestQueryTracker method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session defaultSession = testSessionBuilder().setCatalog("mock").setSchema("default").setSystemProperty(QUERY_MAX_PLANNING_TIME, "2s").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(defaultSession).build();
queryRunner.installPlugin(new Plugin() {
@Override
public Iterable<ConnectorFactory> getConnectorFactories() {
return ImmutableList.of(MockConnectorFactory.builder().withGetColumns(ignored -> ImmutableList.of(new ColumnMetadata("col", VARCHAR))).withApplyFilter((ignored1, ignored2, ignored3) -> freeze()).build());
}
});
queryRunner.createCatalog("mock", "mock");
return queryRunner;
}
use of io.trino.spi.type.VarcharType.VARCHAR in project trino by trinodb.
the class TestRemoveRedundantPredicateAboveTableScan method doesNotFireOnNotFullyExtractedConjunct.
@Test
public void doesNotFireOnNotFullyExtractedConjunct() {
ColumnHandle columnHandle = new TpchColumnHandle("name", VARCHAR);
tester().assertThat(removeRedundantPredicateAboveTableScan).on(p -> p.filter(expression("name LIKE 'LARGE PLATED %'"), p.tableScan(nationTableHandle, ImmutableList.of(p.symbol("name", VARCHAR)), ImmutableMap.of(p.symbol("name", VARCHAR), columnHandle), TupleDomain.fromFixedValues(ImmutableMap.of(columnHandle, NullableValue.of(VARCHAR, Slices.utf8Slice("value"))))))).doesNotFire();
}
use of io.trino.spi.type.VarcharType.VARCHAR in project trino by trinodb.
the class AbstractTestHive method prepareInvalidBuckets.
private void prepareInvalidBuckets(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
createEmptyTable(tableName, storageFormat, ImmutableList.of(new Column("id", HIVE_LONG, Optional.empty()), new Column("name", HIVE_STRING, Optional.empty())), ImmutableList.of(), Optional.of(new HiveBucketProperty(ImmutableList.of("id"), BUCKETING_V1, 8, ImmutableList.of())));
MaterializedResult.Builder dataBuilder = MaterializedResult.resultBuilder(SESSION, BIGINT, VARCHAR);
for (long id = 0; id < 100; id++) {
dataBuilder.row(id, String.valueOf(id));
}
insertData(tableName, dataBuilder.build());
try (Transaction transaction = newTransaction()) {
Set<String> files = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
Path bucket2 = files.stream().map(Path::new).filter(path -> path.getName().startsWith("000002_0_")).collect(onlyElement());
Path bucket5 = files.stream().map(Path::new).filter(path -> path.getName().startsWith("000005_0_")).collect(onlyElement());
HdfsContext context = new HdfsContext(newSession());
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, bucket2);
fileSystem.delete(bucket2, false);
fileSystem.rename(bucket5, bucket2);
}
}
Aggregations