use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class LocalQueryRunner method executeInternal.
private MaterializedResultWithPlan executeInternal(Session session, @Language("SQL") String sql, WarningCollector warningCollector) {
lock.readLock().lock();
try (Closer closer = Closer.create()) {
AtomicReference<MaterializedResult.Builder> builder = new AtomicReference<>();
PageConsumerOutputFactory outputFactory = new PageConsumerOutputFactory(types -> {
builder.compareAndSet(null, MaterializedResult.resultBuilder(session, types));
return builder.get()::page;
});
Plan plan = createPlan(session, sql, warningCollector);
TaskContext taskContext = TestingTaskContext.builder(notificationExecutor, yieldExecutor, session).setMaxSpillSize(nodeSpillConfig.getMaxSpillPerNode()).setQueryMaxSpillSize(nodeSpillConfig.getQueryMaxSpillPerNode()).setQueryMaxTotalMemory(getQueryMaxTotalMemoryPerNode(session)).setTaskPlan(plan.getRoot()).build();
taskContext.getQueryContext().setVerboseExceededMemoryLimitErrorsEnabled(isVerboseExceededMemoryLimitErrorsEnabled(session));
taskContext.getQueryContext().setHeapDumpOnExceededMemoryLimitEnabled(isHeapDumpOnExceededMemoryLimitEnabled(session));
String heapDumpFilePath = Paths.get(getHeapDumpFileDirectory(session), format("%s_%s.hprof", session.getQueryId().getId(), taskContext.getTaskId().getStageExecutionId().getStageId().getId())).toString();
taskContext.getQueryContext().setHeapDumpFilePath(heapDumpFilePath);
List<Driver> drivers = createDrivers(session, plan, outputFactory, taskContext);
drivers.forEach(closer::register);
boolean done = false;
while (!done) {
boolean processed = false;
for (Driver driver : drivers) {
if (alwaysRevokeMemory) {
driver.getDriverContext().getOperatorContexts().stream().filter(operatorContext -> operatorContext.getOperatorStats().getRevocableMemoryReservation().getValue() > 0).forEach(OperatorContext::requestMemoryRevoking);
}
if (!driver.isFinished()) {
driver.process();
processed = true;
}
}
done = !processed;
}
verify(builder.get() != null, "Output operator was not created");
return new MaterializedResultWithPlan(builder.get().build(), plan);
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
lock.readLock().unlock();
}
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class AccumuloQueryRunner method copyTable.
private static void copyTable(QueryRunner queryRunner, String catalog, Session session, String schema, TpchTable<?> table) {
QualifiedObjectName source = new QualifiedObjectName(catalog, schema, table.getTableName());
String target = table.getTableName();
@Language("SQL") String sql;
switch(target) {
case "customer":
sql = format("CREATE TABLE %s WITH (index_columns = 'mktsegment') AS SELECT * FROM %s", target, source);
break;
case "lineitem":
sql = format("CREATE TABLE %s WITH (index_columns = 'quantity,discount,returnflag,shipdate,receiptdate,shipinstruct,shipmode') AS SELECT cast(uuid() AS varchar) AS uuid, * FROM %s", target, source);
break;
case "orders":
sql = format("CREATE TABLE %s WITH (index_columns = 'orderdate') AS SELECT * FROM %s", target, source);
break;
case "part":
sql = format("CREATE TABLE %s WITH (index_columns = 'brand,type,size,container') AS SELECT * FROM %s", target, source);
break;
case "partsupp":
sql = format("CREATE TABLE %s WITH (index_columns = 'partkey') AS SELECT cast(uuid() AS varchar) AS uuid, * FROM %s", target, source);
break;
case "supplier":
sql = format("CREATE TABLE %s WITH (index_columns = 'name') AS SELECT * FROM %s", target, source);
break;
default:
sql = format("CREATE TABLE %s AS SELECT * FROM %s", target, source);
break;
}
LOG.info("Running import for %s", target, sql);
LOG.info("%s", sql);
long start = System.nanoTime();
long rows = queryRunner.execute(session, sql).getUpdateCount().getAsLong();
LOG.info("Imported %s rows for %s in %s", rows, target, nanosSince(start));
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testMismatchedBucketing.
public void testMismatchedBucketing(Session session) {
try {
assertUpdate(session, "CREATE TABLE test_mismatch_bucketing16\n" + "WITH (bucket_count = 16, bucketed_by = ARRAY['key16']) AS\n" + "SELECT orderkey key16, comment value16 FROM orders", 15000);
assertUpdate(session, "CREATE TABLE test_mismatch_bucketing32\n" + "WITH (bucket_count = 32, bucketed_by = ARRAY['key32']) AS\n" + "SELECT orderkey key32, comment value32 FROM orders", 15000);
assertUpdate(session, "CREATE TABLE test_mismatch_bucketingN AS\n" + "SELECT orderkey keyN, comment valueN FROM orders", 15000);
Session withMismatchOptimization = Session.builder(session).setSystemProperty(COLOCATED_JOIN, "true").setCatalogSessionProperty(catalog, "optimize_mismatched_bucket_count", "true").build();
Session withoutMismatchOptimization = Session.builder(session).setSystemProperty(COLOCATED_JOIN, "true").setCatalogSessionProperty(catalog, "optimize_mismatched_bucket_count", "false").build();
@Language("SQL") String writeToTableWithMoreBuckets = "CREATE TABLE test_mismatch_bucketing_out32\n" + "WITH (bucket_count = 32, bucketed_by = ARRAY['key16'])\n" + "AS\n" + "SELECT key16, value16, key32, value32, keyN, valueN\n" + "FROM\n" + " test_mismatch_bucketing16\n" + "JOIN\n" + " test_mismatch_bucketing32\n" + "ON key16=key32\n" + "JOIN\n" + " test_mismatch_bucketingN\n" + "ON key16=keyN";
@Language("SQL") String writeToTableWithFewerBuckets = "CREATE TABLE test_mismatch_bucketing_out8\n" + "WITH (bucket_count = 8, bucketed_by = ARRAY['key16'])\n" + "AS\n" + "SELECT key16, value16, key32, value32, keyN, valueN\n" + "FROM\n" + " test_mismatch_bucketing16\n" + "JOIN\n" + " test_mismatch_bucketing32\n" + "ON key16=key32\n" + "JOIN\n" + " test_mismatch_bucketingN\n" + "ON key16=keyN";
assertUpdate(withoutMismatchOptimization, writeToTableWithMoreBuckets, 15000, assertRemoteExchangesCount(3));
assertQuery(withoutMismatchOptimization, "SELECT * FROM test_mismatch_bucketing_out32", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
assertUpdate(withoutMismatchOptimization, "DROP TABLE IF EXISTS test_mismatch_bucketing_out32");
assertUpdate(withMismatchOptimization, writeToTableWithMoreBuckets, 15000, assertRemoteExchangesCount(2));
assertQuery(withMismatchOptimization, "SELECT * FROM test_mismatch_bucketing_out32", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
assertUpdate(withMismatchOptimization, writeToTableWithFewerBuckets, 15000, assertRemoteExchangesCount(2));
assertQuery(withMismatchOptimization, "SELECT * FROM test_mismatch_bucketing_out8", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
} finally {
assertUpdate(session, "DROP TABLE IF EXISTS test_mismatch_bucketing16");
assertUpdate(session, "DROP TABLE IF EXISTS test_mismatch_bucketing32");
assertUpdate(session, "DROP TABLE IF EXISTS test_mismatch_bucketingN");
assertUpdate(session, "DROP TABLE IF EXISTS test_mismatch_bucketing_out32");
assertUpdate(session, "DROP TABLE IF EXISTS test_mismatch_bucketing_out8");
}
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testPartialMergePushdown.
public void testPartialMergePushdown(Session session) {
try {
assertUpdate("CREATE TABLE test_partial_merge_pushdown_16buckets\n" + "WITH (bucket_count = 16, bucketed_by = ARRAY['key16']) AS\n" + "SELECT orderkey key16, comment value16 FROM orders", 15000);
assertUpdate("CREATE TABLE test_partial_merge_pushdown_32buckets\n" + "WITH (bucket_count = 32, bucketed_by = ARRAY['key32']) AS\n" + "SELECT orderkey key32, comment value32 FROM orders", 15000);
assertUpdate("CREATE TABLE test_partial_merge_pushdown_nobucket AS\n" + "SELECT orderkey key_nobucket, comment value_nobucket FROM orders", 15000);
Session withPartialMergePushdownOptimization = Session.builder(session).setSystemProperty(COLOCATED_JOIN, "true").setSystemProperty(PARTIAL_MERGE_PUSHDOWN_STRATEGY, PUSH_THROUGH_LOW_MEMORY_OPERATORS.name()).build();
Session withoutPartialMergePushdownOptimization = Session.builder(session).setSystemProperty(COLOCATED_JOIN, "true").setSystemProperty(PARTIAL_MERGE_PUSHDOWN_STRATEGY, PartialMergePushdownStrategy.NONE.name()).build();
//
// join and write to bucketed table
// ================================
@Language("SQL") String joinAndWriteToTableWithSameBuckets = "CREATE TABLE test_partial_merge_pushdown_out16\n" + "WITH (bucket_count = 16, bucketed_by = ARRAY['key16'])\n" + "AS\n" + "SELECT key16, value16, key32, value32, key_nobucket, value_nobucket\n" + "FROM\n" + " test_partial_merge_pushdown_16buckets\n" + "JOIN\n" + " test_partial_merge_pushdown_32buckets\n" + "ON key16=key32\n" + "JOIN\n" + " test_partial_merge_pushdown_nobucket\n" + "ON key16=key_nobucket";
@Language("SQL") String joinAndWriteToTableWithFewerBuckets = "CREATE TABLE test_partial_merge_pushdown_out8\n" + "WITH (bucket_count = 8, bucketed_by = ARRAY['key16'])\n" + "AS\n" + "SELECT key16, value16, key32, value32, key_nobucket, value_nobucket\n" + "FROM\n" + " test_partial_merge_pushdown_16buckets\n" + "JOIN\n" + " test_partial_merge_pushdown_32buckets\n" + "ON key16=key32\n" + "JOIN\n" + " test_partial_merge_pushdown_nobucket\n" + "ON key16=key_nobucket";
assertUpdate(withoutPartialMergePushdownOptimization, joinAndWriteToTableWithSameBuckets, 15000, assertRemoteExchangesCount(3));
assertQuery("SELECT * FROM test_partial_merge_pushdown_out16", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out16");
assertUpdate(withPartialMergePushdownOptimization, joinAndWriteToTableWithSameBuckets, 15000, assertRemoteExchangesCount(2));
assertQuery("SELECT * FROM test_partial_merge_pushdown_out16", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out16");
assertUpdate(withoutPartialMergePushdownOptimization, joinAndWriteToTableWithFewerBuckets, 15000, assertRemoteExchangesCount(4));
assertQuery("SELECT * FROM test_partial_merge_pushdown_out8", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out8");
// cannot pushdown the partial merge for TableWrite
assertUpdate(withPartialMergePushdownOptimization, joinAndWriteToTableWithFewerBuckets, 15000, assertRemoteExchangesCount(3));
assertQuery("SELECT * FROM test_partial_merge_pushdown_out8", "SELECT orderkey, comment, orderkey, comment, orderkey, comment from orders");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out8");
//
// read and write to bucketed table
// =====================================
@Language("SQL") String readAndWriteToTableWithFewerBuckets = "CREATE TABLE test_partial_merge_pushdown_out8\n" + "WITH (bucket_count = 8, bucketed_by = ARRAY['key16'])\n" + "AS\n" + "SELECT key16, value16\n" + "FROM\n" + " test_partial_merge_pushdown_16buckets";
assertUpdate(withoutPartialMergePushdownOptimization, readAndWriteToTableWithFewerBuckets, 15000, assertRemoteExchangesCount(2));
assertQuery("SELECT * FROM test_partial_merge_pushdown_out8", "SELECT orderkey, comment from orders");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out8");
assertUpdate(withPartialMergePushdownOptimization, readAndWriteToTableWithFewerBuckets, 15000, assertRemoteExchangesCount(1));
assertQuery("SELECT * FROM test_partial_merge_pushdown_out8", "SELECT orderkey, comment from orders");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out8");
} finally {
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_16buckets");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_32buckets");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_nobucket");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out16");
assertUpdate("DROP TABLE IF EXISTS test_partial_merge_pushdown_out8");
}
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testInsertPartitionedTable.
private void testInsertPartitionedTable(Session session, HiveStorageFormat storageFormat) {
@Language("SQL") String createTable = "" + "CREATE TABLE test_insert_partitioned_table " + "(" + " ORDER_KEY BIGINT," + " SHIP_PRIORITY INTEGER," + " ORDER_STATUS VARCHAR" + ") " + "WITH (" + "format = '" + storageFormat + "', " + "partitioned_by = ARRAY[ 'SHIP_PRIORITY', 'ORDER_STATUS' ]" + ") ";
assertUpdate(session, createTable);
TableMetadata tableMetadata = getTableMetadata(catalog, TPCH_SCHEMA, "test_insert_partitioned_table");
assertEquals(tableMetadata.getMetadata().getProperties().get(STORAGE_FORMAT_PROPERTY), storageFormat);
assertEquals(tableMetadata.getMetadata().getProperties().get(PARTITIONED_BY_PROPERTY), ImmutableList.of("ship_priority", "order_status"));
String partitionsTable = "\"test_insert_partitioned_table$partitions\"";
assertQuery(session, "SELECT * FROM " + partitionsTable, "SELECT shippriority, orderstatus FROM orders LIMIT 0");
// Hive will reorder the partition keys, so we must insert into the table assuming the partition keys have been moved to the end
assertUpdate(session, "" + "INSERT INTO test_insert_partitioned_table " + "SELECT orderkey, shippriority, orderstatus " + "FROM tpch.tiny.orders", "SELECT count(*) from orders");
// verify the partitions
List<?> partitions = getPartitions("test_insert_partitioned_table");
assertEquals(partitions.size(), 3);
assertQuery(session, "SELECT * from test_insert_partitioned_table", "SELECT orderkey, shippriority, orderstatus FROM orders");
assertQuery(session, "SELECT * FROM " + partitionsTable, "SELECT DISTINCT shippriority, orderstatus FROM orders");
assertQuery(session, "SELECT * FROM " + partitionsTable + " ORDER BY order_status LIMIT 2", "SELECT DISTINCT shippriority, orderstatus FROM orders ORDER BY orderstatus LIMIT 2");
assertQuery(session, "SELECT * FROM " + partitionsTable + " WHERE order_status = 'O'", "SELECT DISTINCT shippriority, orderstatus FROM orders WHERE orderstatus = 'O'");
assertQueryFails(session, "SELECT * FROM " + partitionsTable + " WHERE no_such_column = 1", "line \\S*: Column 'no_such_column' cannot be resolved");
assertQueryFails(session, "SELECT * FROM " + partitionsTable + " WHERE orderkey = 1", "line \\S*: Column 'orderkey' cannot be resolved");
assertUpdate(session, "DROP TABLE test_insert_partitioned_table");
assertFalse(getQueryRunner().tableExists(session, "test_insert_partitioned_table"));
}
Aggregations