use of io.trino.tempto.assertions.QueryAssert.Row in project trino by trinodb.
the class TestIcebergSparkCompatibility method testTrinoReadsSparkPartitionedTable.
@Test(groups = { ICEBERG, PROFILE_SPECIFIC_TESTS }, dataProvider = "storageFormatsWithSpecVersion")
public void testTrinoReadsSparkPartitionedTable(StorageFormat storageFormat, int specVersion) {
String baseTableName = "test_trino_reads_spark_partitioned_table_" + storageFormat;
String trinoTableName = trinoTableName(baseTableName);
String sparkTableName = sparkTableName(baseTableName);
onSpark().executeQuery("DROP TABLE IF EXISTS " + sparkTableName);
onSpark().executeQuery(format("CREATE TABLE %s (_string STRING, _varbinary BINARY, _bigint BIGINT) USING ICEBERG PARTITIONED BY (_string, _varbinary) TBLPROPERTIES ('write.format.default'='%s', 'format-version' = %s)", sparkTableName, storageFormat, specVersion));
onSpark().executeQuery(format("INSERT INTO %s VALUES ('a', X'0ff102f0feff', 1001), ('b', X'0ff102f0fefe', 1002), ('c', X'0ff102fdfeff', 1003)", sparkTableName));
Row row1 = row("a", new byte[] { 15, -15, 2, -16, -2, -1 }, 1001);
String select = "SELECT * FROM %s WHERE _string = 'a'";
assertThat(onSpark().executeQuery(format(select, sparkTableName))).containsOnly(row1);
assertThat(onTrino().executeQuery(format(select, trinoTableName))).containsOnly(row1);
Row row2 = row("c", new byte[] { 15, -15, 2, -3, -2, -1 }, 1003);
String selectByVarbinary = "SELECT * FROM %s WHERE _varbinary = X'0ff102fdfeff'";
assertThat(onTrino().executeQuery(format(selectByVarbinary, trinoTableName))).containsOnly(row2);
// for now this fails on spark see https://github.com/apache/iceberg/issues/2934
assertQueryFailure(() -> onSpark().executeQuery(format(selectByVarbinary, sparkTableName))).hasMessageContaining("Cannot convert bytes to SQL literal: java.nio.HeapByteBuffer[pos=0 lim=6 cap=6]");
onSpark().executeQuery("DROP TABLE " + sparkTableName);
}
use of io.trino.tempto.assertions.QueryAssert.Row in project trino by trinodb.
the class TestIcebergSparkCompatibility method testSparkReadingNestedTrinoData.
@Test(groups = { ICEBERG, PROFILE_SPECIFIC_TESTS }, dataProvider = "storageFormats")
public void testSparkReadingNestedTrinoData(StorageFormat storageFormat) {
String baseTableName = "test_spark_reading_nested_trino_data_" + storageFormat;
String trinoTableName = trinoTableName(baseTableName);
String sparkTableName = sparkTableName(baseTableName);
onTrino().executeQuery(format("CREATE TABLE %s (\n" + " doc_id VARCHAR\n" + ", nested_map MAP(VARCHAR, ARRAY(ROW(sname VARCHAR, snumber INT)))\n" + ", nested_array ARRAY(MAP(VARCHAR, ARRAY(ROW(mname VARCHAR, mnumber INT))))\n" + ", nested_struct ROW(name VARCHAR, complicated ARRAY(MAP(VARCHAR, ARRAY(ROW(mname VARCHAR, mnumber INT))))))" + " WITH (format = '%s')", trinoTableName, storageFormat));
onTrino().executeQuery(format("INSERT INTO %s SELECT" + " 'Doc213'" + ", map(array['s1'], array[array[row('ASName1', 201), row('ASName2', 202)]])" + ", array[map(array['m1'], array[array[row('MAS1Name1', 301), row('MAS1Name2', 302)]])" + " ,map(array['m2'], array[array[row('MAS2Name1', 401), row('MAS2Name2', 402)]])]" + ", row('S1'" + " ,array[map(array['m1'], array[array[row('SAMA1Name1', 301), row('SAMA1Name2', 302)]])" + " ,map(array['m2'], array[array[row('SAMA2Name1', 401), row('SAMA2Name2', 402)]])])", trinoTableName));
Row row = row("Doc213", "ASName2", 201, "MAS2Name1", 302, "SAMA1Name1", 402);
assertThat(onTrino().executeQuery("SELECT" + " doc_id" + ", nested_map['s1'][2].sname" + ", nested_map['s1'][1].snumber" + ", nested_array[2]['m2'][1].mname" + ", nested_array[1]['m1'][2].mnumber" + ", nested_struct.complicated[1]['m1'][1].mname" + ", nested_struct.complicated[2]['m2'][2].mnumber" + " FROM " + trinoTableName)).containsOnly(row);
QueryResult sparkResult = onSpark().executeQuery("SELECT" + " doc_id" + ", nested_map['s1'][1].sname" + ", nested_map['s1'][0].snumber" + ", nested_array[1]['m2'][0].mname" + ", nested_array[0]['m1'][1].mnumber" + ", nested_struct.complicated[0]['m1'][0].mname" + ", nested_struct.complicated[1]['m2'][1].mnumber" + " FROM " + sparkTableName);
assertThat(sparkResult).containsOnly(row);
onTrino().executeQuery("DROP TABLE " + trinoTableName);
}
use of io.trino.tempto.assertions.QueryAssert.Row in project trino by trinodb.
the class TestHiveStorageFormats method assertResultEqualForLineitemTable.
/**
* Run the given query on the given table and the TPCH {@code lineitem} table
* (in the schema {@code TPCH_SCHEMA}, asserting that the results are equal.
*/
private static void assertResultEqualForLineitemTable(String query, String tableName) {
QueryResult expected = onTrino().executeQuery(format(query, "tpch." + TPCH_SCHEMA + ".lineitem"));
List<Row> expectedRows = expected.rows().stream().map(columns -> row(columns.toArray())).collect(toImmutableList());
QueryResult actual = onTrino().executeQuery(format(query, tableName));
assertThat(actual).hasColumns(expected.getColumnTypes()).containsExactlyInOrder(expectedRows);
}
use of io.trino.tempto.assertions.QueryAssert.Row in project trino by trinodb.
the class TestHiveStorageFormats method assertStructTimestamps.
/**
* Assertions for tables created by {@link #createStructTimestampTable(String, StorageFormat)}
*/
private void assertStructTimestamps(String tableName, Collection<TimestampAndPrecision> data) {
SoftAssertions softly = new SoftAssertions();
for (HiveTimestampPrecision precision : HiveTimestampPrecision.values()) {
setTimestampPrecision(precision);
// Check that the correct types are read
String type = format("timestamp(%d)", precision.getPrecision());
softly.check(() -> assertThat(onTrino().executeQuery(format("SELECT" + " typeof(arr)," + " typeof(map)," + " typeof(row)," + " typeof(nested)" + " FROM %s" + " LIMIT 1", tableName))).as("timestamp container types").containsOnly(row(format("array(%s)", type), format("map(%1$s, %1$s)", type), format("row(col %s)", type), format("array(map(%1$s, row(col array(%1$s))))", type))));
// Check the values as varchar
softly.check(() -> assertThat(onTrino().executeQuery(format("SELECT" + " id," + " CAST(arr[1] AS VARCHAR)," + // key
" CAST(map_entries(map)[1][1] AS VARCHAR)," + // value
" CAST(map_entries(map)[1][2] AS VARCHAR)," + " CAST(row.col AS VARCHAR)," + // key
" CAST(map_entries(nested[1])[1][1] AS VARCHAR)," + // value
" CAST(map_entries(nested[1])[1][2].col[1] AS VARCHAR)" + " FROM %s" + " ORDER BY id", tableName))).as("timestamp containers as varchar").containsExactlyInOrder(data.stream().sorted(comparingInt(TimestampAndPrecision::getId)).map(e -> new Row(Lists.asList(e.getId(), nCopies(6, e.getReadValue(precision)).toArray()))).collect(toList())));
// Check the values directly
softly.check(() -> assertThat(onTrino().executeQuery(format("SELECT" + " id," + " arr[1]," + // key
" map_entries(map)[1][1]," + // value
" map_entries(map)[1][2]," + " row.col," + // key
" map_entries(nested[1])[1][1]," + // value
" map_entries(nested[1])[1][2].col[1]" + " FROM %s" + " ORDER BY id", tableName))).as("timestamp containers").containsExactlyInOrder(data.stream().sorted(comparingInt(TimestampAndPrecision::getId)).map(e -> new Row(Lists.asList(e.getId(), nCopies(6, Timestamp.valueOf(e.getReadValue(precision))).toArray()))).collect(toList())));
}
softly.assertAll();
}
Aggregations