use of io.trino.testing.MaterializedResult in project trino by trinodb.
the class BaseHiveConnectorTest method assertOneNotNullResult.
private void assertOneNotNullResult(Session session, @Language("SQL") String query) {
MaterializedResult results = getQueryRunner().execute(session, query).toTestTypes();
assertEquals(results.getRowCount(), 1);
assertEquals(results.getMaterializedRows().get(0).getFieldCount(), 1);
assertNotNull(results.getMaterializedRows().get(0).getField(0));
}
use of io.trino.testing.MaterializedResult in project trino by trinodb.
the class BaseHiveConnectorTest method testSortedWritingTempStaging.
@Test
public void testSortedWritingTempStaging() {
String tableName = "test_sorted_writing";
@Language("SQL") String createTableSql = format("" + "CREATE TABLE %s " + "WITH (" + " bucket_count = 7," + " bucketed_by = ARRAY['shipmode']," + " sorted_by = ARRAY['shipmode']" + ") AS " + "SELECT * FROM tpch.tiny.lineitem", tableName);
Session session = Session.builder(getSession()).setCatalogSessionProperty("hive", "sorted_writing_enabled", "true").setCatalogSessionProperty("hive", "temporary_staging_directory_enabled", "true").setCatalogSessionProperty("hive", "temporary_staging_directory_path", "/tmp/custom/temporary-${USER}").build();
assertUpdate(session, createTableSql, 60175L);
MaterializedResult expected = computeActual("SELECT * FROM tpch.tiny.lineitem");
MaterializedResult actual = computeActual("SELECT * FROM " + tableName);
assertEqualsIgnoreOrder(actual.getMaterializedRows(), expected.getMaterializedRows());
assertUpdate("DROP TABLE " + tableName);
}
use of io.trino.testing.MaterializedResult in project trino by trinodb.
the class BaseHiveConnectorTest method testCreateAvroTableWithSchemaUrl.
@Test
public void testCreateAvroTableWithSchemaUrl() throws Exception {
String tableName = "test_create_avro_table_with_schema_url";
File schemaFile = createAvroSchemaFile();
String createTableSql = getAvroCreateTableSql(tableName, schemaFile.getAbsolutePath());
String expectedShowCreateTable = getAvroCreateTableSql(tableName, schemaFile.toURI().toString());
assertUpdate(createTableSql);
try {
MaterializedResult actual = computeActual("SHOW CREATE TABLE " + tableName);
assertEquals(actual.getOnlyValue(), expectedShowCreateTable);
} finally {
assertUpdate("DROP TABLE " + tableName);
verify(schemaFile.delete(), "cannot delete temporary file: %s", schemaFile);
}
}
use of io.trino.testing.MaterializedResult in project trino by trinodb.
the class BaseHiveConnectorTest method testShowColumnsPartitionKey.
@Test
public void testShowColumnsPartitionKey() {
assertUpdate("" + "CREATE TABLE test_show_columns_partition_key\n" + "(grape bigint, orange bigint, pear varchar(65535), mango integer, lychee smallint, kiwi tinyint, apple varchar, pineapple varchar(65535))\n" + "WITH (partitioned_by = ARRAY['apple', 'pineapple'])");
MaterializedResult actual = computeActual("SHOW COLUMNS FROM test_show_columns_partition_key");
Type unboundedVarchar = canonicalizeType(VARCHAR);
MaterializedResult expected = resultBuilder(getSession(), unboundedVarchar, unboundedVarchar, unboundedVarchar, unboundedVarchar).row("grape", canonicalizeType(BIGINT).toString(), "", "").row("orange", canonicalizeType(BIGINT).toString(), "", "").row("pear", canonicalizeType(createVarcharType(65535)).toString(), "", "").row("mango", canonicalizeType(INTEGER).toString(), "", "").row("lychee", canonicalizeType(SMALLINT).toString(), "", "").row("kiwi", canonicalizeType(TINYINT).toString(), "", "").row("apple", canonicalizeType(VARCHAR).toString(), "partition key", "").row("pineapple", canonicalizeType(createVarcharType(65535)).toString(), "partition key", "").build();
assertEquals(actual, expected);
}
use of io.trino.testing.MaterializedResult in project trino by trinodb.
the class BaseHiveConnectorTest method testFileSizeHiddenColumn.
@Test
public void testFileSizeHiddenColumn() {
@Language("SQL") String createTable = "CREATE TABLE test_file_size " + "WITH (" + "partitioned_by = ARRAY['col1']" + ") AS " + "SELECT * FROM (VALUES " + "(0, 0), (3, 0), (6, 0), " + "(1, 1), (4, 1), (7, 1), " + "(2, 2), (5, 2) " + " ) t(col0, col1) ";
assertUpdate(createTable, 8);
assertTrue(getQueryRunner().tableExists(getSession(), "test_file_size"));
TableMetadata tableMetadata = getTableMetadata(catalog, TPCH_SCHEMA, "test_file_size");
List<String> columnNames = ImmutableList.of("col0", "col1", PATH_COLUMN_NAME, FILE_SIZE_COLUMN_NAME, FILE_MODIFIED_TIME_COLUMN_NAME, PARTITION_COLUMN_NAME);
List<ColumnMetadata> columnMetadatas = tableMetadata.getColumns();
assertEquals(columnMetadatas.size(), columnNames.size());
for (int i = 0; i < columnMetadatas.size(); i++) {
ColumnMetadata columnMetadata = columnMetadatas.get(i);
assertEquals(columnMetadata.getName(), columnNames.get(i));
if (columnMetadata.getName().equals(FILE_SIZE_COLUMN_NAME)) {
assertTrue(columnMetadata.isHidden());
}
}
assertEquals(getPartitions("test_file_size").size(), 3);
MaterializedResult results = computeActual(format("SELECT *, \"%s\" FROM test_file_size", FILE_SIZE_COLUMN_NAME));
Map<Integer, Long> fileSizeMap = new HashMap<>();
for (int i = 0; i < results.getRowCount(); i++) {
MaterializedRow row = results.getMaterializedRows().get(i);
int col0 = (int) row.getField(0);
int col1 = (int) row.getField(1);
long fileSize = (Long) row.getField(2);
assertTrue(fileSize > 0);
assertEquals(col0 % 3, col1);
if (fileSizeMap.containsKey(col1)) {
assertEquals(fileSizeMap.get(col1).longValue(), fileSize);
} else {
fileSizeMap.put(col1, fileSize);
}
}
assertEquals(fileSizeMap.size(), 3);
assertUpdate("DROP TABLE test_file_size");
}
Aggregations