use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testCreateExternalTable.
@Test
public void testCreateExternalTable() throws Exception {
File tempDir = createTempDir();
File dataFile = new File(tempDir, "test.txt");
Files.write("hello\nworld\n", dataFile, UTF_8);
@Language("SQL") String createTableSql = format("" + "CREATE TABLE %s.%s.test_create_external (\n" + " \"name\" varchar\n" + ")\n" + "WITH (\n" + " external_location = '%s',\n" + " format = 'TEXTFILE'\n" + ")", getSession().getCatalog().get(), getSession().getSchema().get(), new Path(tempDir.toURI().toASCIIString()).toString());
assertUpdate(createTableSql);
MaterializedResult actual = computeActual("SHOW CREATE TABLE test_create_external");
assertEquals(actual.getOnlyValue(), createTableSql);
actual = computeActual("SELECT name FROM test_create_external");
assertEquals(actual.getOnlyColumnAsSet(), ImmutableSet.of("hello", "world"));
assertUpdate("DROP TABLE test_create_external");
// file should still exist after drop
assertFile(dataFile);
deleteRecursively(tempDir.toPath(), ALLOW_INSECURE);
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testInsertPartitionedTableOverwriteExistingPartition.
private void testInsertPartitionedTableOverwriteExistingPartition(Session session, HiveStorageFormat storageFormat) {
String tableName = "test_insert_partitioned_table_overwrite_existing_partition";
@Language("SQL") String createTable = "" + "CREATE TABLE " + tableName + " " + "(" + " order_key BIGINT," + " comment VARCHAR," + " order_status VARCHAR" + ") " + "WITH (" + "format = '" + storageFormat + "', " + "partitioned_by = ARRAY[ 'order_status' ]" + ") ";
assertUpdate(session, createTable);
TableMetadata tableMetadata = getTableMetadata(catalog, TPCH_SCHEMA, tableName);
assertEquals(tableMetadata.getMetadata().getProperties().get(STORAGE_FORMAT_PROPERTY), storageFormat);
assertEquals(tableMetadata.getMetadata().getProperties().get(PARTITIONED_BY_PROPERTY), ImmutableList.of("order_status"));
for (int i = 0; i < 3; i++) {
assertUpdate(session, format("INSERT INTO " + tableName + " " + "SELECT orderkey, comment, orderstatus " + "FROM tpch.tiny.orders " + "WHERE orderkey %% 3 = %d", i), format("SELECT count(*) from orders where orderkey %% 3 = %d", i));
// verify the partitions
List<?> partitions = getPartitions(tableName);
assertEquals(partitions.size(), 3);
assertQuery(session, "SELECT * from " + tableName, format("SELECT orderkey, comment, orderstatus FROM orders where orderkey %% 3 = %d", i));
}
assertUpdate(session, "DROP TABLE " + tableName);
assertFalse(getQueryRunner().tableExists(session, tableName));
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testInsertPartitionedTableImmutableExistingPartition.
public void testInsertPartitionedTableImmutableExistingPartition(Session session, HiveStorageFormat storageFormat) {
String tableName = "test_insert_partitioned_table_immutable_existing_partition";
@Language("SQL") String createTable = "" + "CREATE TABLE " + tableName + " " + "(" + " order_key BIGINT," + " comment VARCHAR," + " order_status VARCHAR" + ") " + "WITH (" + "format = '" + storageFormat + "', " + "partitioned_by = ARRAY[ 'order_status' ]" + ") ";
assertUpdate(session, createTable);
TableMetadata tableMetadata = getTableMetadata(catalog, TPCH_SCHEMA, tableName);
assertEquals(tableMetadata.getMetadata().getProperties().get(STORAGE_FORMAT_PROPERTY), storageFormat);
assertEquals(tableMetadata.getMetadata().getProperties().get(PARTITIONED_BY_PROPERTY), ImmutableList.of("order_status"));
assertUpdate(session, format("INSERT INTO " + tableName + " " + "SELECT orderkey, comment, orderstatus " + "FROM tpch.tiny.orders " + "WHERE orderkey %% 3 = %d", 0), format("SELECT count(*) from orders where orderkey %% 3 = %d", 0));
// verify the partitions
List<?> partitions = getPartitions(tableName);
assertEquals(partitions.size(), 3);
assertQuery(session, "SELECT * from " + tableName, format("SELECT orderkey, comment, orderstatus FROM orders where orderkey %% 3 = %d", 0));
assertQueryFails(session, format("INSERT INTO " + tableName + " " + "SELECT orderkey, comment, orderstatus " + "FROM tpch.tiny.orders " + "WHERE orderkey %% 3 = %d", 0), ".*Cannot insert into an existing partition of Hive table.*");
partitions = getPartitions(tableName);
assertEquals(partitions.size(), 3);
assertQuery(session, "SELECT * from " + tableName, format("SELECT orderkey, comment, orderstatus FROM orders where orderkey %% 3 = %d", 0));
assertUpdate(session, "DROP TABLE " + tableName);
assertFalse(getQueryRunner().tableExists(session, tableName));
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method testPartialAggregatePushdownParquet.
@Test
public void testPartialAggregatePushdownParquet() {
@Language("SQL") String createTable = "" + "CREATE TABLE test_parquet_table (" + " _boolean BOOLEAN" + ", _tinyint TINYINT" + ", _smallint SMALLINT" + ", _integer INTEGER" + ", _bigint BIGINT" + ", _real REAL" + ", _double DOUBLE" + ", _shortdecimal DECIMAL(8,3)" + ", _longdecimal DECIMAL(25,2)" + ", _string VARCHAR" + ", _varchar VARCHAR(10)" + ", _singlechar CHAR" + ", _char CHAR(10)" + ", _varbinary VARBINARY" + ", _date DATE" + ", _timestamp TIMESTAMP" + ")" + "WITH (format = 'parquet')";
Session session = Session.builder(getSession()).setCatalogSessionProperty(catalog, "partial_aggregation_pushdown_enabled", "true").setCatalogSessionProperty(catalog, "partial_aggregation_pushdown_for_variable_length_datatypes_enabled", "true").build();
try {
assertUpdate(session, createTable);
TableMetadata tableMetadata = getTableMetadata(catalog, TPCH_SCHEMA, "test_parquet_table");
assertEquals(tableMetadata.getMetadata().getProperties().get(STORAGE_FORMAT_PROPERTY), HiveStorageFormat.PARQUET);
assertUpdate(session, "INSERT INTO test_parquet_table VALUES (" + "true" + ", cast(1 as tinyint)" + ", cast(2 as smallint)" + ", 3" + ", 4" + ", 1.2" + ", 2.3" + ", 4.5" + ", 55555555555555.32" + ", 'abc'" + ", 'def'" + ", 'g'" + ", 'hij'" + ", cast('klm' as varbinary)" + ", cast('2020-05-01' as date)" + ", cast('2020-06-04 16:55:40.777' as timestamp)" + ")", 1);
assertUpdate(session, "INSERT INTO test_parquet_table VALUES (" + "false" + ", cast(10 as tinyint)" + ", cast(20 as smallint)" + ", 30" + ", 40" + ", 10.25" + ", 25.334" + ", 465.523" + ", 88888888555555.91" + ", 'foo'" + ", 'bar'" + ", 'b'" + ", 'baz'" + ", cast('qux' as varbinary)" + ", cast('2020-06-02' as date)" + ", cast('2020-05-01 18:34:23.88' as timestamp)" + ")", 1);
String rowCount = "SELECT 2";
assertQuery(session, "SELECT COUNT(*) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_boolean) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_tinyint) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_smallint) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_integer) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_bigint) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_real) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_double) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_shortdecimal) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_longdecimal) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_string) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_varchar) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_singlechar) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_char) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_varbinary) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_date) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT COUNT(_timestamp) FROM test_parquet_table", rowCount);
assertQuery(session, "SELECT MIN(_boolean), MAX(_boolean) FROM test_parquet_table", "select false, true");
assertQuery(session, "SELECT MIN(_tinyint), MAX(_tinyint) FROM test_parquet_table", "select 1, 10");
assertQuery(session, "SELECT MIN(_smallint), MAX(_smallint) FROM test_parquet_table", "select 2, 20");
assertQuery(session, "SELECT MIN(_integer), MAX(_integer) FROM test_parquet_table", "select 3, 30");
assertQuery(session, "SELECT MIN(_bigint), MAX(_bigint) FROM test_parquet_table", "select 4, 40");
assertQuery(session, "SELECT MIN(_real), MAX(_real) FROM test_parquet_table", "select 1.2, 10.25");
assertQuery(session, "SELECT MIN(_double), MAX(_double) FROM test_parquet_table", "select 2.3, 25.334");
assertQuery(session, "SELECT MIN(_shortdecimal), MAX(_shortdecimal) FROM test_parquet_table", "select 4.5, 465.523");
assertQuery(session, "SELECT MIN(_longdecimal), MAX(_longdecimal) FROM test_parquet_table", "select 55555555555555.32, 88888888555555.91");
assertQuery(session, "SELECT MIN(_string), MAX(_string) FROM test_parquet_table", "select 'abc', 'foo'");
assertQuery(session, "SELECT MIN(_varchar), MAX(_varchar) FROM test_parquet_table", "select 'bar', 'def'");
assertQuery(session, "SELECT MIN(_singlechar), MAX(_singlechar) FROM test_parquet_table", "select 'b', 'g'");
assertQuery(session, "SELECT MIN(_char), MAX(_char) FROM test_parquet_table", "select 'baz', 'hij'");
assertQuery(session, "SELECT MIN(_varbinary), MAX(_varbinary) FROM test_parquet_table", "select X'6b6c6d', X'717578'");
assertQuery(session, "SELECT MIN(_date), MAX(_date) FROM test_parquet_table", "select cast('2020-05-01' as date), cast('2020-06-02' as date)");
assertQuery(session, "SELECT MIN(_timestamp), MAX(_timestamp) FROM test_parquet_table", "select cast('2020-05-01 18:34:23.88' as timestamp), cast('2020-06-04 16:55:40.777' as timestamp)");
} finally {
assertUpdate(session, "DROP TABLE test_parquet_table");
}
assertFalse(getQueryRunner().tableExists(session, "test_parquet_table"));
}
use of org.intellij.lang.annotations.Language in project presto by prestodb.
the class TestHiveIntegrationSmokeTest method createTableWithEveryType.
@Test
public void createTableWithEveryType() {
@Language("SQL") String query = "" + "CREATE TABLE test_types_table AS " + "SELECT" + " 'foo' _varchar" + ", cast('bar' as varbinary) _varbinary" + ", cast(1 as bigint) _bigint" + ", 2 _integer" + ", CAST('3.14' AS DOUBLE) _double" + ", true _boolean" + ", DATE '1980-05-07' _date" + ", TIMESTAMP '1980-05-07 11:22:33.456' _timestamp" + ", CAST('3.14' AS DECIMAL(3,2)) _decimal_short" + ", CAST('12345678901234567890.0123456789' AS DECIMAL(30,10)) _decimal_long" + ", CAST('bar' AS CHAR(10)) _char";
assertUpdate(query, 1);
MaterializedResult results = getQueryRunner().execute(getSession(), "SELECT * FROM test_types_table").toTestTypes();
assertEquals(results.getRowCount(), 1);
MaterializedRow row = results.getMaterializedRows().get(0);
assertEquals(row.getField(0), "foo");
assertEquals(row.getField(1), "bar".getBytes(UTF_8));
assertEquals(row.getField(2), 1L);
assertEquals(row.getField(3), 2);
assertEquals(row.getField(4), 3.14);
assertEquals(row.getField(5), true);
assertEquals(row.getField(6), LocalDate.of(1980, 5, 7));
assertEquals(row.getField(7), LocalDateTime.of(1980, 5, 7, 11, 22, 33, 456_000_000));
assertEquals(row.getField(8), new BigDecimal("3.14"));
assertEquals(row.getField(9), new BigDecimal("12345678901234567890.0123456789"));
assertEquals(row.getField(10), "bar ");
assertUpdate("DROP TABLE test_types_table");
assertFalse(getQueryRunner().tableExists(getSession(), "test_types_table"));
}
Aggregations