use of io.trino.tempto.query.QueryResult in project trino by trinodb.
the class TestKafkaReadsSmokeTest method testSelectSimpleKeyAndValue.
@Test(groups = { KAFKA, PROFILE_SPECIFIC_TESTS })
@Requires(SimpleKeyAndValueTable.class)
public void testSelectSimpleKeyAndValue() {
QueryResult queryResult = onTrino().executeQuery(format("select varchar_key, bigint_key, varchar_value, bigint_value from %s.%s.%s", KAFKA_CATALOG, SCHEMA_NAME, SIMPLE_KEY_AND_VALUE_TABLE_NAME));
assertThat(queryResult).containsOnly(row("jasio", 1, "ania", 2), row("piotr", 3, "kasia", 4));
}
use of io.trino.tempto.query.QueryResult in project trino by trinodb.
the class TestKuduConnectoKerberosSmokeTest method kerberosAuthTicketExpiryTest.
@Test(groups = { KUDU, PROFILE_SPECIFIC_TESTS })
public void kerberosAuthTicketExpiryTest() throws InterruptedException {
String kuduTable = "kudu.default.nation_" + UUID.randomUUID().toString().replace("-", "");
String table = "tpch.tiny.nation";
assertThat(onTrino().executeQuery(format("SELECT count(*) from %s", table))).containsExactlyInOrder(row(25));
QueryResult result = onTrino().executeQuery(format("CREATE TABLE %s AS SELECT * FROM %s", kuduTable, table));
try {
assertThat(result).updatedRowsCountIsEqualTo(25);
assertThat(onTrino().executeQuery(format("SELECT count(*) FROM %s", kuduTable))).containsExactlyInOrder(row(25));
// Kerberos tickets are configured to expire after 60 seconds, this should expire the ticket
Thread.sleep(70_000L);
assertThat(onTrino().executeQuery(format("SELECT count(*) FROM %s", kuduTable))).containsExactlyInOrder(row(25));
} finally {
onTrino().executeQuery(format("DROP TABLE %s", kuduTable));
}
}
use of io.trino.tempto.query.QueryResult in project trino by trinodb.
the class TestCreateTableAsSelect method testCreateTableAsSelect.
@Test(groups = { MYSQL, PROFILE_SPECIFIC_TESTS })
public void testCreateTableAsSelect() {
QueryResult queryResult = onTrino().executeQuery(format("CREATE TABLE mysql.%s AS SELECT * FROM tpch.tiny.nation", TABLE_NAME));
assertThat(queryResult).containsOnly(row(25));
}
use of io.trino.tempto.query.QueryResult in project trino by trinodb.
the class TestIcebergSparkCompatibility method testTrinoReadingMigratedNestedData.
@Test(groups = { ICEBERG, PROFILE_SPECIFIC_TESTS }, dataProvider = "storageFormats")
public void testTrinoReadingMigratedNestedData(StorageFormat storageFormat) {
String baseTableName = "test_trino_reading_migrated_nested_data_" + randomTableSuffix();
String defaultCatalogTableName = sparkDefaultCatalogTableName(baseTableName);
String sparkTableDefinition = "" + "CREATE TABLE %s (\n" + " doc_id STRING\n" + ", nested_map MAP<STRING, ARRAY<STRUCT<sName: STRING, sNumber: INT>>>\n" + ", nested_array ARRAY<MAP<STRING, ARRAY<STRUCT<mName: STRING, mNumber: INT>>>>\n" + ", nested_struct STRUCT<id:INT, name:STRING, address:STRUCT<street_number:INT, street_name:STRING>>)\n" + " USING %s";
onSpark().executeQuery(format(sparkTableDefinition, defaultCatalogTableName, storageFormat.name()));
String insert = "" + "INSERT INTO TABLE %s SELECT" + " 'Doc213'" + ", map('s1', array(named_struct('sName', 'ASName1', 'sNumber', 201), named_struct('sName', 'ASName2', 'sNumber', 202)))" + ", array(map('m1', array(named_struct('mName', 'MAS1Name1', 'mNumber', 301), named_struct('mName', 'MAS1Name2', 'mNumber', 302)))" + " ,map('m2', array(named_struct('mName', 'MAS2Name1', 'mNumber', 401), named_struct('mName', 'MAS2Name2', 'mNumber', 402))))" + ", named_struct('id', 1, 'name', 'P. Sherman', 'address', named_struct('street_number', 42, 'street_name', 'Wallaby Way'))";
onSpark().executeQuery(format(insert, defaultCatalogTableName));
onSpark().executeQuery(format("CALL system.migrate('%s')", defaultCatalogTableName));
String sparkTableName = sparkTableName(baseTableName);
Row row = row("Doc213", "ASName2", 201, "MAS2Name1", 302, "P. Sherman", 42, "Wallaby Way");
String sparkSelect = "SELECT" + " doc_id" + ", nested_map['s1'][1].sName" + ", nested_map['s1'][0].sNumber" + ", nested_array[1]['m2'][0].mName" + ", nested_array[0]['m1'][1].mNumber" + ", nested_struct.name" + ", nested_struct.address.street_number" + ", nested_struct.address.street_name" + " FROM ";
QueryResult sparkResult = onSpark().executeQuery(sparkSelect + sparkTableName);
// The Spark behavior when the default name mapping does not exist is not consistent
assertThat(sparkResult).containsOnly(row);
String trinoSelect = "SELECT" + " doc_id" + ", nested_map['s1'][2].sName" + ", nested_map['s1'][1].sNumber" + ", nested_array[2]['m2'][1].mName" + ", nested_array[1]['m1'][2].mNumber" + ", nested_struct.name" + ", nested_struct.address.street_number" + ", nested_struct.address.street_name" + " FROM ";
String trinoTableName = trinoTableName(baseTableName);
QueryResult trinoResult = onTrino().executeQuery(trinoSelect + trinoTableName);
assertThat(trinoResult).containsOnly(row);
// After removing the name mapping, columns from migrated files should be null since they are missing the Iceberg Field IDs
onSpark().executeQuery(format("ALTER TABLE %s UNSET TBLPROPERTIES ('schema.name-mapping.default')", sparkTableName));
assertThat(onTrino().executeQuery(trinoSelect + trinoTableName)).containsOnly(row(null, null, null, null, null, null, null, null));
assertThat(onTrino().executeQuery("SELECT * FROM " + trinoTableName)).containsOnly(row(null, null, null, null));
assertThat(onTrino().executeQuery("SELECT nested_struct.address.street_number, nested_struct.address.street_name FROM " + trinoTableName)).containsOnly(row(null, null));
}
use of io.trino.tempto.query.QueryResult in project trino by trinodb.
the class TestInsert method testInsertMin.
@Test(groups = { SQL_SERVER, PROFILE_SPECIFIC_TESTS })
public void testInsertMin() {
String sql = format("INSERT INTO %s.%s values (BIGINT '%s', SMALLINT '%s', INTEGER '%s', DOUBLE '%s', " + "CHAR 'a ', 'aa', DOUBLE '%s', DATE '%s')", SQLSERVER, INSERT_TABLE_NAME, Long.MIN_VALUE, Short.MIN_VALUE, Integer.MIN_VALUE, Double.MIN_VALUE, Double.MIN_VALUE, Date.valueOf("1970-01-01"));
onTrino().executeQuery(sql);
sql = format("SELECT * FROM %s.%s", MASTER, INSERT_TABLE_NAME);
QueryResult queryResult = onSqlServer().executeQuery(sql);
assertThat(queryResult).contains(row(Long.MIN_VALUE, Short.MIN_VALUE, Integer.MIN_VALUE, Double.MIN_VALUE, "a ", "aa", Double.MIN_VALUE, Date.valueOf("1970-01-01")));
}
Aggregations