use of io.trino.spi.type.TestingTypeManager in project trino by trinodb.
the class TestAvroSchemaConverter method testUnsupportedUnionType.
@Test
public void testUnsupportedUnionType() {
assertThatThrownBy(() -> new AvroSchemaConverter(new TestingTypeManager(), IGNORE).convertAvroSchema(SchemaBuilder.record(RECORD_NAME).fields().name("union_col").type().unionOf().nullType().and().floatType().and().longType().endUnion().noDefault().endRecord())).isInstanceOf(UnsupportedOperationException.class).hasMessageStartingWith("Incompatible UNION type:");
assertThatThrownBy(() -> new AvroSchemaConverter(new TestingTypeManager(), IGNORE).convertAvroSchema(SchemaBuilder.record(RECORD_NAME).fields().name("union_col").type().unionOf().nullType().and().fixed("fixed").size(5).and().stringType().endUnion().noDefault().endRecord())).isInstanceOf(UnsupportedOperationException.class).hasMessageStartingWith("Incompatible UNION type:");
assertThatThrownBy(() -> new AvroSchemaConverter(new TestingTypeManager(), IGNORE).convertAvroSchema(SchemaBuilder.record(RECORD_NAME).fields().name("union_col").type().unionOf().nullType().and().booleanType().and().intType().endUnion().noDefault().endRecord())).isInstanceOf(UnsupportedOperationException.class).hasMessageStartingWith("Incompatible UNION type:");
}
use of io.trino.spi.type.TestingTypeManager in project trino by trinodb.
the class TestAvroSchemaConverter method testConvertSchema.
@Test
public void testConvertSchema() {
Schema schema = SchemaBuilder.record(RECORD_NAME).fields().name("bool_col").type().booleanType().noDefault().name("int_col").type().intType().noDefault().name("long_col").type().longType().noDefault().name("float_col").type().floatType().noDefault().name("double_col").type().doubleType().noDefault().name("string_col").type().stringType().noDefault().name("enum_col").type().enumeration("colors").symbols("blue", "red", "yellow").noDefault().name("bytes_col").type().bytesType().noDefault().name("fixed_col").type().fixed("fixed").size(5).noDefault().name("union_col").type().unionOf().nullType().and().floatType().and().doubleType().endUnion().noDefault().name("union_col2").type().unionOf().nullType().and().intType().and().longType().endUnion().noDefault().name("union_col3").type().unionOf().nullType().and().bytesType().and().type("fixed").endUnion().noDefault().name("union_col4").type().unionOf().nullType().and().type("colors").and().stringType().endUnion().noDefault().name("list_col").type().array().items().intType().noDefault().name("map_col").type().map().values().intType().noDefault().name("record_col").type().record("record_col").fields().name("nested_list").type().array().items().map().values().stringType().noDefault().name("nested_map").type().map().values().array().items().stringType().noDefault().endRecord().noDefault().endRecord();
AvroSchemaConverter avroSchemaConverter = new AvroSchemaConverter(new TestingTypeManager(), IGNORE);
List<Type> types = avroSchemaConverter.convertAvroSchema(schema);
List<Type> expected = ImmutableList.<Type>builder().add(BOOLEAN).add(INTEGER).add(BIGINT).add(REAL).add(DOUBLE).add(VARCHAR).add(VARCHAR).add(VARBINARY).add(VARBINARY).add(DOUBLE).add(BIGINT).add(VARBINARY).add(VARCHAR).add(new ArrayType(INTEGER)).add(createType(INTEGER)).add(RowType.from(ImmutableList.<RowType.Field>builder().add(new RowType.Field(Optional.of("nested_list"), new ArrayType(createType(VARCHAR)))).add(new RowType.Field(Optional.of("nested_map"), createType(new ArrayType(VARCHAR)))).build())).build();
assertEquals(types, expected);
}
use of io.trino.spi.type.TestingTypeManager in project trino by trinodb.
the class TestHiveSplit method testJsonRoundTrip.
@Test
public void testJsonRoundTrip() {
ObjectMapperProvider objectMapperProvider = new ObjectMapperProvider();
objectMapperProvider.setJsonDeserializers(ImmutableMap.of(Type.class, new TypeDeserializer(new TestingTypeManager())));
JsonCodec<HiveSplit> codec = new JsonCodecFactory(objectMapperProvider).jsonCodec(HiveSplit.class);
Properties schema = new Properties();
schema.setProperty("foo", "bar");
schema.setProperty("bar", "baz");
ImmutableList<HivePartitionKey> partitionKeys = ImmutableList.of(new HivePartitionKey("a", "apple"), new HivePartitionKey("b", "42"));
ImmutableList<HostAddress> addresses = ImmutableList.of(HostAddress.fromParts("127.0.0.1", 44), HostAddress.fromParts("127.0.0.1", 45));
AcidInfo.Builder acidInfoBuilder = AcidInfo.builder(new Path("file:///data/fullacid"));
acidInfoBuilder.addDeleteDelta(new Path("file:///data/fullacid/delete_delta_0000004_0000004_0000"));
acidInfoBuilder.addDeleteDelta(new Path("file:///data/fullacid/delete_delta_0000007_0000007_0000"));
AcidInfo acidInfo = acidInfoBuilder.build().get();
HiveSplit expected = new HiveSplit("db", "table", "partitionId", "path", 42, 87, 88, Instant.now().toEpochMilli(), schema, partitionKeys, addresses, OptionalInt.empty(), 0, true, TableToPartitionMapping.mapColumnsByIndex(ImmutableMap.of(1, new HiveTypeName("string"))), Optional.of(new HiveSplit.BucketConversion(BUCKETING_V1, 32, 16, ImmutableList.of(createBaseColumn("col", 5, HIVE_LONG, BIGINT, ColumnType.REGULAR, Optional.of("comment"))))), Optional.empty(), false, Optional.of(acidInfo), 555534, // some non-standard value
SplitWeight.fromProportion(2.0));
String json = codec.toJson(expected);
HiveSplit actual = codec.fromJson(json);
assertEquals(actual.getDatabase(), expected.getDatabase());
assertEquals(actual.getTable(), expected.getTable());
assertEquals(actual.getPartitionName(), expected.getPartitionName());
assertEquals(actual.getPath(), expected.getPath());
assertEquals(actual.getStart(), expected.getStart());
assertEquals(actual.getLength(), expected.getLength());
assertEquals(actual.getEstimatedFileSize(), expected.getEstimatedFileSize());
assertEquals(actual.getSchema(), expected.getSchema());
assertEquals(actual.getPartitionKeys(), expected.getPartitionKeys());
assertEquals(actual.getAddresses(), expected.getAddresses());
assertEquals(actual.getTableToPartitionMapping().getPartitionColumnCoercions(), expected.getTableToPartitionMapping().getPartitionColumnCoercions());
assertEquals(actual.getTableToPartitionMapping().getTableToPartitionColumns(), expected.getTableToPartitionMapping().getTableToPartitionColumns());
assertEquals(actual.getBucketConversion(), expected.getBucketConversion());
assertEquals(actual.isForceLocalScheduling(), expected.isForceLocalScheduling());
assertEquals(actual.isS3SelectPushdownEnabled(), expected.isS3SelectPushdownEnabled());
assertEquals(actual.getAcidInfo().get(), expected.getAcidInfo().get());
assertEquals(actual.getSplitNumber(), expected.getSplitNumber());
assertEquals(actual.getSplitWeight(), expected.getSplitWeight());
}
use of io.trino.spi.type.TestingTypeManager in project trino by trinodb.
the class TestIcebergOrcMetricsCollection method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
Session session = testSessionBuilder().setCatalog("iceberg").setSchema("test_schema").setSystemProperty(TASK_CONCURRENCY, "1").setSystemProperty(TASK_WRITER_COUNT, "1").setSystemProperty(MAX_DRIVERS_PER_TASK, "1").setCatalogSessionProperty("iceberg", "orc_string_statistics_limit", Integer.MAX_VALUE + "B").build();
DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(session).setNodeCount(1).build();
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("test_version"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
queryRunner.installPlugin(new TestingIcebergPlugin(Optional.of(metastore), Optional.empty(), EMPTY_MODULE));
queryRunner.createCatalog("iceberg", "iceberg");
queryRunner.installPlugin(new TpchPlugin());
queryRunner.createCatalog("tpch", "tpch");
queryRunner.execute("CREATE SCHEMA test_schema");
return queryRunner;
}
use of io.trino.spi.type.TestingTypeManager in project trino by trinodb.
the class TestIcebergMergeAppend method createQueryRunner.
@Override
protected QueryRunner createQueryRunner() throws Exception {
DistributedQueryRunner queryRunner = IcebergQueryRunner.createIcebergQueryRunner();
HdfsConfig hdfsConfig = new HdfsConfig();
HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hdfsConfig), ImmutableSet.of());
HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hdfsConfig, new NoHdfsAuthentication());
File baseDir = queryRunner.getCoordinator().getBaseDataDir().resolve("iceberg_data").toFile();
HiveMetastore metastore = new FileHiveMetastore(new NodeVersion("testversion"), hdfsEnvironment, new MetastoreConfig(), new FileHiveMetastoreConfig().setCatalogDirectory(baseDir.toURI().toString()).setMetastoreUser("test"));
tableOperationsProvider = new FileMetastoreTableOperationsProvider(new HdfsFileIoProvider(hdfsEnvironment));
trinoCatalog = new TrinoHiveCatalog(new CatalogName("catalog"), memoizeMetastore(metastore, 1000), hdfsEnvironment, new TestingTypeManager(), tableOperationsProvider, "trino-version", false, false, false);
return queryRunner;
}
Aggregations