use of io.trino.tempto.query.QueryExecutionException in project trino by trinodb.
the class TestHiveStorageFormats method testNestedFields.
private void testNestedFields(String format, Engine writer) {
String tableName = "test_nested_fields_written_by_" + writer.name().toLowerCase(ENGLISH);
onTrino().executeQuery("DROP TABLE IF EXISTS " + tableName);
onTrino().executeQuery("CREATE TABLE " + tableName + " (" + " r row(a int), " + " rr row(r row(a int)), " + " ra row(a array(int)), " + " dummy varchar) WITH (format='" + format + "')");
switch(writer) {
case HIVE:
ensureDummyExists();
writer.queryExecutor().executeQuery("INSERT INTO " + tableName + " SELECT " + "named_struct('a', 42), " + "named_struct('r', named_struct('a', 43)), " + "named_struct('a', array(11, 22, 33)), " + "'dummy value' " + "FROM dummy");
break;
case TRINO:
writer.queryExecutor().executeQuery("INSERT INTO " + tableName + " VALUES (" + "row(42), " + "row(row(43)), " + "row(ARRAY[11, 22, 33]), " + "'dummy value')");
break;
default:
throw new IllegalStateException("Unsupported writer: " + writer);
}
assertThat(onTrino().executeQuery("SELECT * FROM " + tableName)).containsOnly(row(rowBuilder().addField("a", 42).build(), rowBuilder().addField("r", rowBuilder().addField("a", 43).build()).build(), rowBuilder().addField("a", List.of(11, 22, 33)).build(), "dummy value"));
// with dereference
assertThat(onTrino().executeQuery("SELECT r.a, rr.r.a, ra.a[2] FROM " + tableName)).containsOnly(row(42, 43, 22));
// with dereference in predicate
assertThat(onTrino().executeQuery("SELECT dummy FROM " + tableName + " WHERE r.a = 42 AND rr.r.a = 43 AND ra.a[2] = 22")).containsOnly(row("dummy value"));
// verify with Hive if data written by Trino
if (writer != Engine.HIVE) {
QueryResult queryResult = null;
try {
queryResult = onHive().executeQuery("SELECT * FROM " + tableName);
verify(queryResult != null);
} catch (QueryExecutionException e) {
if ("AVRO".equals(format)) {
// TODO (https://github.com/trinodb/trino/issues/9285) Some versions of Hive cannot read Avro nested structs written by Trino
Assertions.assertThat(e.getCause()).hasToString("java.sql.SQLException: java.io.IOException: org.apache.avro.AvroTypeException: Found default.record_1, expecting union");
} else {
throw e;
}
}
if (queryResult != null) {
assertThat(queryResult).containsOnly(row("{\"a\":42}", "{\"r\":{\"a\":43}}", "{\"a\":[11,22,33]}", "dummy value"));
}
}
onTrino().executeQuery("DROP TABLE " + tableName);
}
use of io.trino.tempto.query.QueryExecutionException in project trino by trinodb.
the class TestAvroSchemaUrl method isOnHdp.
private boolean isOnHdp() {
try {
QueryResult queryResult = onHive().executeQuery("SET system:hdp.version");
String hdpVersion = (String) queryResult.row(0).get(0);
return !isNullOrEmpty(hdpVersion);
} catch (QueryExecutionException e) {
return false;
}
}
use of io.trino.tempto.query.QueryExecutionException in project trino by trinodb.
the class TestTablePartitioningSelect method testSelectPartitionedHiveTableDifferentFormats.
@Test
public void testSelectPartitionedHiveTableDifferentFormats() {
String tableNameInDatabase = tablesState.get(TABLE_NAME).getNameInDatabase();
String selectFromOnePartitionsSql = "SELECT * FROM " + tableNameInDatabase + " WHERE part_col = 2";
QueryResult onePartitionQueryResult = onTrino().executeQuery(selectFromOnePartitionsSql);
assertThat(onePartitionQueryResult).containsOnly(row(42, 2));
try {
// This query should fail or return null values for invalid partition data
assertThat(onTrino().executeQuery("SELECT * FROM " + tableNameInDatabase)).containsOnly(row(42, 2), row(null, 1));
} catch (QueryExecutionException expectedDueToInvalidPartitionData) {
}
}
use of io.trino.tempto.query.QueryExecutionException in project trino by trinodb.
the class TestIcebergSparkCompatibility method testTrinoSparkConcurrentInsert.
/**
* @see TestIcebergInsert#testIcebergConcurrentInsert()
*/
@Test(groups = { ICEBERG, PROFILE_SPECIFIC_TESTS }, timeOut = 60_000)
public void testTrinoSparkConcurrentInsert() throws Exception {
int insertsPerEngine = 7;
String baseTableName = "trino_spark_insert_concurrent_" + randomTableSuffix();
String trinoTableName = trinoTableName(baseTableName);
String sparkTableName = sparkTableName(baseTableName);
onTrino().executeQuery("CREATE TABLE " + trinoTableName + "(e varchar, a bigint)");
ExecutorService executor = Executors.newFixedThreadPool(2);
try {
CyclicBarrier barrier = new CyclicBarrier(2);
QueryExecutor onTrino = onTrino();
QueryExecutor onSpark = onSpark();
List<Row> allInserted = executor.invokeAll(Stream.of(Engine.TRINO, Engine.SPARK).map(engine -> (Callable<List<Row>>) () -> {
List<Row> inserted = new ArrayList<>();
for (int i = 0; i < insertsPerEngine; i++) {
barrier.await(20, SECONDS);
String engineName = engine.name().toLowerCase(ENGLISH);
long value = i;
switch(engine) {
case TRINO:
try {
onTrino.executeQuery(format("INSERT INTO %s VALUES ('%s', %d)", trinoTableName, engineName, value));
} catch (QueryExecutionException queryExecutionException) {
// next loop iteration
continue;
}
break;
case SPARK:
onSpark.executeQuery(format("INSERT INTO %s VALUES ('%s', %d)", sparkTableName, engineName, value));
break;
default:
throw new UnsupportedOperationException("Unexpected engine: " + engine);
}
inserted.add(row(engineName, value));
}
return inserted;
}).collect(toImmutableList())).stream().map(MoreFutures::getDone).flatMap(List::stream).collect(toImmutableList());
// At least one INSERT per round should succeed
Assertions.assertThat(allInserted).hasSizeBetween(insertsPerEngine, insertsPerEngine * 2);
// All Spark inserts should succeed (and not be obliterated)
assertThat(onTrino().executeQuery("SELECT count(*) FROM " + trinoTableName + " WHERE e = 'spark'")).containsOnly(row(insertsPerEngine));
assertThat(onTrino().executeQuery("SELECT * FROM " + trinoTableName)).containsOnly(allInserted);
onTrino().executeQuery("DROP TABLE " + trinoTableName);
} finally {
executor.shutdownNow();
}
}
use of io.trino.tempto.query.QueryExecutionException in project trino by trinodb.
the class TestIcebergInsert method testIcebergConcurrentInsert.
/**
* @see TestIcebergCreateTable#testCreateTable() See TestIcebergCreateTable for a non-concurrent INSERT test coverage.
* @see TestIcebergSparkCompatibility#testTrinoSparkConcurrentInsert()
*/
@Test(groups = { ICEBERG, STORAGE_FORMATS_DETAILED, HMS_ONLY }, timeOut = 60_000)
public void testIcebergConcurrentInsert() throws Exception {
int threads = 3;
int insertsPerThread = 7;
String tableName = "iceberg.default.test_insert_concurrent_" + randomTableSuffix();
onTrino().executeQuery("CREATE TABLE " + tableName + "(a bigint)");
ExecutorService executor = Executors.newFixedThreadPool(threads);
try {
CyclicBarrier barrier = new CyclicBarrier(threads);
QueryExecutor onTrino = onTrino();
List<Long> allInserted = executor.invokeAll(IntStream.range(0, threads).mapToObj(thread -> (Callable<List<Long>>) () -> {
List<Long> inserted = new ArrayList<>();
for (int i = 0; i < insertsPerThread; i++) {
barrier.await(20, SECONDS);
long value = i + (long) insertsPerThread * thread;
try {
onTrino.executeQuery("INSERT INTO " + tableName + " VALUES " + value);
} catch (QueryExecutionException queryExecutionException) {
// failed to insert
continue;
}
inserted.add(value);
}
return inserted;
}).collect(toImmutableList())).stream().map(MoreFutures::getDone).flatMap(List::stream).collect(toImmutableList());
// At least one INSERT per round should succeed
Assertions.assertThat(allInserted).hasSizeBetween(insertsPerThread, threads * insertsPerThread);
assertThat(onTrino().executeQuery("SELECT * FROM " + tableName)).containsOnly(allInserted.stream().map(QueryAssert.Row::row).toArray(QueryAssert.Row[]::new));
onTrino().executeQuery("DROP TABLE " + tableName);
} finally {
executor.shutdownNow();
}
}
Aggregations