use of io.trino.tempto.query.QueryExecutor in project trino by trinodb.
the class TestSqlStandardAccessControlChecks method testAccessControlSetHiveViewAuthorization.
@Test(groups = { AUTHORIZATION, PROFILE_SPECIFIC_TESTS })
public void testAccessControlSetHiveViewAuthorization() {
onHive().executeQuery("CREATE TABLE test_hive_table (col1 int)");
onHive().executeQuery("CREATE VIEW test_hive_view AS SELECT * FROM test_hive_table");
QueryExecutor hdfsExecutor = connectToTrino("hdfs@presto");
assertQueryFailure(() -> bobExecutor.executeQuery("ALTER VIEW test_hive_view SET AUTHORIZATION bob")).hasMessageContaining("Access Denied: Cannot set authorization for view default.test_hive_view to USER bob");
assertQueryFailure(() -> bobExecutor.executeQuery("DROP VIEW test_hive_view")).hasMessageContaining("Access Denied: Cannot drop view default.test_hive_view");
hdfsExecutor.executeQuery("ALTER VIEW test_hive_view SET AUTHORIZATION bob");
bobExecutor.executeQuery("DROP VIEW test_hive_view");
onHive().executeQuery("DROP TABLE test_hive_table");
}
use of io.trino.tempto.query.QueryExecutor in project trino by trinodb.
the class TestIcebergSparkCompatibility method testTrinoSparkConcurrentInsert.
/**
* @see TestIcebergInsert#testIcebergConcurrentInsert()
*/
@Test(groups = { ICEBERG, PROFILE_SPECIFIC_TESTS }, timeOut = 60_000)
public void testTrinoSparkConcurrentInsert() throws Exception {
int insertsPerEngine = 7;
String baseTableName = "trino_spark_insert_concurrent_" + randomTableSuffix();
String trinoTableName = trinoTableName(baseTableName);
String sparkTableName = sparkTableName(baseTableName);
onTrino().executeQuery("CREATE TABLE " + trinoTableName + "(e varchar, a bigint)");
ExecutorService executor = Executors.newFixedThreadPool(2);
try {
CyclicBarrier barrier = new CyclicBarrier(2);
QueryExecutor onTrino = onTrino();
QueryExecutor onSpark = onSpark();
List<Row> allInserted = executor.invokeAll(Stream.of(Engine.TRINO, Engine.SPARK).map(engine -> (Callable<List<Row>>) () -> {
List<Row> inserted = new ArrayList<>();
for (int i = 0; i < insertsPerEngine; i++) {
barrier.await(20, SECONDS);
String engineName = engine.name().toLowerCase(ENGLISH);
long value = i;
switch(engine) {
case TRINO:
try {
onTrino.executeQuery(format("INSERT INTO %s VALUES ('%s', %d)", trinoTableName, engineName, value));
} catch (QueryExecutionException queryExecutionException) {
// next loop iteration
continue;
}
break;
case SPARK:
onSpark.executeQuery(format("INSERT INTO %s VALUES ('%s', %d)", sparkTableName, engineName, value));
break;
default:
throw new UnsupportedOperationException("Unexpected engine: " + engine);
}
inserted.add(row(engineName, value));
}
return inserted;
}).collect(toImmutableList())).stream().map(MoreFutures::getDone).flatMap(List::stream).collect(toImmutableList());
// At least one INSERT per round should succeed
Assertions.assertThat(allInserted).hasSizeBetween(insertsPerEngine, insertsPerEngine * 2);
// All Spark inserts should succeed (and not be obliterated)
assertThat(onTrino().executeQuery("SELECT count(*) FROM " + trinoTableName + " WHERE e = 'spark'")).containsOnly(row(insertsPerEngine));
assertThat(onTrino().executeQuery("SELECT * FROM " + trinoTableName)).containsOnly(allInserted);
onTrino().executeQuery("DROP TABLE " + trinoTableName);
} finally {
executor.shutdownNow();
}
}
use of io.trino.tempto.query.QueryExecutor in project trino by trinodb.
the class AbstractTestHiveViews method testSelectFromHiveViewWithoutDefaultCatalogAndSchema.
@Test(groups = HIVE_VIEWS)
public void testSelectFromHiveViewWithoutDefaultCatalogAndSchema() {
onHive().executeQuery("DROP VIEW IF EXISTS no_catalog_schema_view");
onHive().executeQuery("CREATE VIEW no_catalog_schema_view AS SELECT * FROM nation WHERE n_nationkey = 1");
QueryExecutor executor = connectToTrino("presto_no_default_catalog");
assertQueryFailure(() -> executor.executeQuery("SELECT count(*) FROM no_catalog_schema_view")).hasMessageMatching(".*Schema must be specified when session schema is not set.*");
assertThat(executor.executeQuery("SELECT count(*) FROM hive.default.no_catalog_schema_view")).containsOnly(row(1L));
}
use of io.trino.tempto.query.QueryExecutor in project trino by trinodb.
the class TestHiveViewsLegacy method connectToTrino.
@Override
protected QueryExecutor connectToTrino(String catalog) {
QueryExecutor executor = super.connectToTrino(catalog);
executor.executeQuery("SET SESSION hive.legacy_hive_view_translation = true");
return executor;
}
use of io.trino.tempto.query.QueryExecutor in project trino by trinodb.
the class TestIcebergInsert method testIcebergConcurrentInsert.
/**
* @see TestIcebergCreateTable#testCreateTable() See TestIcebergCreateTable for a non-concurrent INSERT test coverage.
* @see TestIcebergSparkCompatibility#testTrinoSparkConcurrentInsert()
*/
@Test(groups = { ICEBERG, STORAGE_FORMATS_DETAILED, HMS_ONLY }, timeOut = 60_000)
public void testIcebergConcurrentInsert() throws Exception {
int threads = 3;
int insertsPerThread = 7;
String tableName = "iceberg.default.test_insert_concurrent_" + randomTableSuffix();
onTrino().executeQuery("CREATE TABLE " + tableName + "(a bigint)");
ExecutorService executor = Executors.newFixedThreadPool(threads);
try {
CyclicBarrier barrier = new CyclicBarrier(threads);
QueryExecutor onTrino = onTrino();
List<Long> allInserted = executor.invokeAll(IntStream.range(0, threads).mapToObj(thread -> (Callable<List<Long>>) () -> {
List<Long> inserted = new ArrayList<>();
for (int i = 0; i < insertsPerThread; i++) {
barrier.await(20, SECONDS);
long value = i + (long) insertsPerThread * thread;
try {
onTrino.executeQuery("INSERT INTO " + tableName + " VALUES " + value);
} catch (QueryExecutionException queryExecutionException) {
// failed to insert
continue;
}
inserted.add(value);
}
return inserted;
}).collect(toImmutableList())).stream().map(MoreFutures::getDone).flatMap(List::stream).collect(toImmutableList());
// At least one INSERT per round should succeed
Assertions.assertThat(allInserted).hasSizeBetween(insertsPerThread, threads * insertsPerThread);
assertThat(onTrino().executeQuery("SELECT * FROM " + tableName)).containsOnly(allInserted.stream().map(QueryAssert.Row::row).toArray(QueryAssert.Row[]::new));
onTrino().executeQuery("DROP TABLE " + tableName);
} finally {
executor.shutdownNow();
}
}
Aggregations