Search in sources :

Example 1 with TemporaryHiveTable

use of io.trino.tests.product.hive.util.TemporaryHiveTable in project trino by trinodb.

the class TestHiveTransactionalTable method testCtasAcidTable.

@Test(groups = HIVE_TRANSACTIONAL, dataProvider = "testCreateAcidTableDataProvider")
public void testCtasAcidTable(boolean isPartitioned, BucketingType bucketingType) {
    if (getHiveVersionMajor() < 3) {
        throw new SkipException("Hive transactional tables are supported with Hive version 3 or above");
    }
    try (TemporaryHiveTable table = TemporaryHiveTable.temporaryHiveTable(format("ctas_transactional_%s", randomTableSuffix()))) {
        String tableName = table.getName();
        onTrino().executeQuery("CREATE TABLE " + tableName + " " + trinoTableProperties(ACID, isPartitioned, bucketingType) + " AS SELECT * FROM (VALUES (21, 1, 1), (22, 1, 2), (23, 2, 2)) t(col, fcol, partcol)");
        // can we query from Trino
        assertThat(onTrino().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE partcol = 2 ORDER BY col")).containsOnly(row(22, 1), row(23, 2));
        // can we query from Hive
        assertThat(onHive().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE partcol = 2 ORDER BY col")).containsOnly(row(22, 1), row(23, 2));
    }
}
Also used : TemporaryHiveTable(io.trino.tests.product.hive.util.TemporaryHiveTable) SkipException(org.testng.SkipException) Test(org.testng.annotations.Test)

Example 2 with TemporaryHiveTable

use of io.trino.tests.product.hive.util.TemporaryHiveTable in project trino by trinodb.

the class TestHiveBucketedTables method testBucketingWithUnsupportedDataTypes.

@Test(dataProvider = "testBucketingWithUnsupportedDataTypesDataProvider")
@Flaky(issue = ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE, match = ERROR_COMMITTING_WRITE_TO_HIVE_MATCH)
public void testBucketingWithUnsupportedDataTypes(BucketingType bucketingType, String columnToBeBucketed) {
    try (TemporaryHiveTable table = temporaryHiveTable("table_with_unsupported_bucketing_types_" + randomTableSuffix())) {
        String tableName = table.getName();
        onHive().executeQuery(format("CREATE TABLE %s (" + "n_integer       INT," + "n_decimal       DECIMAL(9, 2)," + "n_timestamp     TIMESTAMP," + "n_char          CHAR(10)," + "n_binary        BINARY," + "n_union         UNIONTYPE<INT,STRING>," + "n_struct        STRUCT<field1:INT,field2:STRING>) " + "CLUSTERED BY (%s) INTO 2 BUCKETS " + "STORED AS ORC " + "%s", tableName, columnToBeBucketed, hiveTableProperties(bucketingType)));
        QueryResult showCreateTableResult = onTrino().executeQuery("SHOW CREATE TABLE " + tableName);
        assertThat(showCreateTableResult).hasRowsCount(1);
        Assertions.assertThat((String) getOnlyElement(getOnlyElement(showCreateTableResult.rows()))).matches(Pattern.compile(format("\\QCREATE TABLE hive.default.%s (\n" + "   n_integer integer,\n" + "   n_decimal decimal(9, 2),\n" + "   n_timestamp timestamp(3),\n" + "   n_char char(10),\n" + "   n_binary varbinary,\n" + "   n_union ROW(tag tinyint, field0 integer, field1 varchar),\n" + "   n_struct ROW(field1 integer, field2 varchar)\n" + ")\n" + "WITH (\\E(?s:.*)" + "bucket_count = 2,\n(?s:.*)" + "bucketed_by = ARRAY\\['%s'\\],\n(?s:.*)" + "bucketing_version = %s,(?s:.*)", tableName, Pattern.quote(columnToBeBucketed), getExpectedBucketVersion(bucketingType))));
        populateRowToHiveTable(tableName, ImmutableList.<String>builder().add("1").add("CAST(1 AS DECIMAL(9, 2))").add("CAST('2015-01-01T00:01:00.15' AS TIMESTAMP)").add("'char value'").add("unhex('00010203')").add("create_union(0, 1, 'union value')").add("named_struct('field1', 1, 'field2', 'Field2')").build(), Optional.empty());
        assertThat(onTrino().executeQuery(format("SELECT * FROM %s", tableName))).hasRowsCount(1);
        assertQueryFailure(() -> onTrino().executeQuery("SELECT \"$bucket\" FROM " + tableName)).hasMessageMatching("Query failed \\(#\\w+\\):\\Q line 1:8: Column '$bucket' cannot be resolved");
        assertQueryFailure(() -> onTrino().executeQuery(format("INSERT INTO %s(n_integer) VALUES (1)", tableName))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot write to a table bucketed on an unsupported type");
        String newTableName = "new_" + tableName;
        assertQueryFailure(() -> onTrino().executeQuery(format("CREATE TABLE %s (LIKE %s INCLUDING PROPERTIES)", newTableName, tableName))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot create a table bucketed on an unsupported type");
        assertQueryFailure(() -> onTrino().executeQuery(format("CREATE TABLE %s (" + "n_integer       integer," + "n_decimal       decimal(9, 2)," + "n_timestamp     timestamp(3)," + "n_char          char(10)," + "n_binary        varbinary," + "n_union         ROW(tag tinyint, field0 integer, field1 varchar)," + "n_struct        ROW(field1 integer, field2 varchar)) " + "WITH (" + "   bucketed_by = ARRAY['%s']," + "   bucket_count = 2" + ")", newTableName, columnToBeBucketed))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot create a table bucketed on an unsupported type");
        assertQueryFailure(() -> onTrino().executeQuery(format("CREATE TABLE %s WITH (%s) AS SELECT * FROM %s", newTableName, bucketingType.getTrinoTableProperties(columnToBeBucketed, 2).stream().collect(joining(",")), tableName))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot create a table bucketed on an unsupported type");
    }
}
Also used : QueryResult(io.trino.tempto.query.QueryResult) TemporaryHiveTable(io.trino.tests.product.hive.util.TemporaryHiveTable) Test(org.testng.annotations.Test) Flaky(io.trino.testng.services.Flaky)

Example 3 with TemporaryHiveTable

use of io.trino.tests.product.hive.util.TemporaryHiveTable in project trino by trinodb.

the class TestHiveTransactionalTable method testReadInsertOnly.

private void testReadInsertOnly(boolean isPartitioned, BucketingType bucketingType, String hiveTableFormatDefinition) {
    if (getHiveVersionMajor() < 3) {
        throw new SkipException("Hive transactional tables are supported with Hive version 3 or above");
    }
    try (TemporaryHiveTable table = TemporaryHiveTable.temporaryHiveTable(tableName("insert_only", isPartitioned, bucketingType))) {
        String tableName = table.getName();
        onHive().executeQuery("CREATE TABLE " + tableName + " (col INT) " + (isPartitioned ? "PARTITIONED BY (part_col INT) " : "") + bucketingType.getHiveClustering("col", 4) + " " + hiveTableFormatDefinition + " " + hiveTableProperties(INSERT_ONLY, bucketingType));
        String hivePartitionString = isPartitioned ? " PARTITION (part_col=2) " : "";
        String predicate = isPartitioned ? " WHERE part_col = 2 " : "";
        onHive().executeQuery("INSERT OVERWRITE TABLE " + tableName + hivePartitionString + " SELECT 1");
        String selectFromOnePartitionsSql = "SELECT col FROM " + tableName + predicate + " ORDER BY COL";
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsOnly(row(1));
        onHive().executeQuery("INSERT INTO TABLE " + tableName + hivePartitionString + " SELECT 2");
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(1), row(2));
        assertThat(onTrino().executeQuery("SELECT col FROM " + tableName + " WHERE col=2")).containsExactlyInOrder(row(2));
        // test minor compacted data read
        compactTableAndWait(MINOR, tableName, hivePartitionString, new Duration(6, MINUTES));
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(1), row(2));
        assertThat(onTrino().executeQuery("SELECT col FROM " + tableName + " WHERE col=2")).containsExactlyInOrder(row(2));
        onHive().executeQuery("INSERT OVERWRITE TABLE " + tableName + hivePartitionString + " SELECT 3");
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsOnly(row(3));
        if (getHiveVersionMajor() >= 4) {
            // Major compaction on insert only table does not work prior to Hive 4:
            // https://issues.apache.org/jira/browse/HIVE-21280
            // test major compaction
            onHive().executeQuery("INSERT INTO TABLE " + tableName + hivePartitionString + " SELECT 4");
            compactTableAndWait(MAJOR, tableName, hivePartitionString, new Duration(6, MINUTES));
            assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsOnly(row(3), row(4));
        }
    }
}
Also used : TemporaryHiveTable(io.trino.tests.product.hive.util.TemporaryHiveTable) Duration(io.airlift.units.Duration) SkipException(org.testng.SkipException)

Example 4 with TemporaryHiveTable

use of io.trino.tests.product.hive.util.TemporaryHiveTable in project trino by trinodb.

the class TestHiveTransactionalTable method testFailAcidBeforeHive3.

@Test(groups = HIVE_TRANSACTIONAL)
public void testFailAcidBeforeHive3() {
    if (getHiveVersionMajor() >= 3) {
        throw new SkipException("This tests behavior of ACID table before Hive 3 ");
    }
    try (TemporaryHiveTable table = TemporaryHiveTable.temporaryHiveTable("test_fail_acid_before_hive3_" + randomTableSuffix())) {
        String tableName = table.getName();
        onHive().executeQuery("" + "CREATE TABLE " + tableName + "(a bigint) " + "CLUSTERED BY(a) INTO 4 BUCKETS " + "STORED AS ORC " + "TBLPROPERTIES ('transactional'='true')");
        assertQueryFailure(() -> onTrino().executeQuery("SELECT * FROM " + tableName)).hasMessageContaining("Failed to open transaction. Transactional tables support requires Hive metastore version at least 3.0");
    }
}
Also used : TemporaryHiveTable(io.trino.tests.product.hive.util.TemporaryHiveTable) SkipException(org.testng.SkipException) Test(org.testng.annotations.Test)

Example 5 with TemporaryHiveTable

use of io.trino.tests.product.hive.util.TemporaryHiveTable in project trino by trinodb.

the class TestHiveTransactionalTable method doTestReadFullAcid.

private void doTestReadFullAcid(boolean isPartitioned, BucketingType bucketingType) {
    if (getHiveVersionMajor() < 3) {
        throw new SkipException("Hive transactional tables are supported with Hive version 3 or above");
    }
    try (TemporaryHiveTable table = TemporaryHiveTable.temporaryHiveTable(tableName("read_full_acid", isPartitioned, bucketingType))) {
        String tableName = table.getName();
        onHive().executeQuery("CREATE TABLE " + tableName + " (col INT, fcol INT) " + (isPartitioned ? "PARTITIONED BY (part_col INT) " : "") + bucketingType.getHiveClustering("fcol", 4) + " " + "STORED AS ORC " + hiveTableProperties(ACID, bucketingType));
        String hivePartitionString = isPartitioned ? " PARTITION (part_col=2) " : "";
        onHive().executeQuery("INSERT OVERWRITE TABLE " + tableName + hivePartitionString + " VALUES (21, 1)");
        String selectFromOnePartitionsSql = "SELECT col, fcol FROM " + tableName + " ORDER BY col";
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsOnly(row(21, 1));
        onHive().executeQuery("INSERT INTO TABLE " + tableName + hivePartitionString + " VALUES (22, 2)");
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(21, 1), row(22, 2));
        // test filtering
        assertThat(onTrino().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE fcol = 1 ORDER BY col")).containsOnly(row(21, 1));
        onHive().executeQuery("INSERT INTO TABLE " + tableName + hivePartitionString + " VALUES (24, 4)");
        onHive().executeQuery("DELETE FROM " + tableName + " where fcol=4");
        // test filtering
        assertThat(onTrino().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE fcol = 1 ORDER BY col")).containsOnly(row(21, 1));
        // test minor compacted data read
        onHive().executeQuery("INSERT INTO TABLE " + tableName + hivePartitionString + " VALUES (20, 3)");
        assertThat(onTrino().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE col=20")).containsExactlyInOrder(row(20, 3));
        compactTableAndWait(MINOR, tableName, hivePartitionString, new Duration(6, MINUTES));
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(20, 3), row(21, 1), row(22, 2));
        // delete a row
        onHive().executeQuery("DELETE FROM " + tableName + " WHERE fcol=2");
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(20, 3), row(21, 1));
        assertThat(onTrino().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE col=20")).containsExactlyInOrder(row(20, 3));
        // update the existing row
        String predicate = "fcol = 1" + (isPartitioned ? " AND part_col = 2 " : "");
        onHive().executeQuery("UPDATE " + tableName + " SET col = 23 WHERE " + predicate);
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(20, 3), row(23, 1));
        assertThat(onTrino().executeQuery("SELECT col, fcol FROM " + tableName + " WHERE col=20")).containsExactlyInOrder(row(20, 3));
        // test major compaction
        compactTableAndWait(MAJOR, tableName, hivePartitionString, new Duration(6, MINUTES));
        assertThat(onTrino().executeQuery(selectFromOnePartitionsSql)).containsExactlyInOrder(row(20, 3), row(23, 1));
    }
}
Also used : TemporaryHiveTable(io.trino.tests.product.hive.util.TemporaryHiveTable) Duration(io.airlift.units.Duration) SkipException(org.testng.SkipException)

Aggregations

TemporaryHiveTable (io.trino.tests.product.hive.util.TemporaryHiveTable)7 SkipException (org.testng.SkipException)4 Test (org.testng.annotations.Test)3 Duration (io.airlift.units.Duration)2 QueryExecutionException (io.trino.tempto.query.QueryExecutionException)1 QueryResult (io.trino.tempto.query.QueryResult)1 Flaky (io.trino.testng.services.Flaky)1 SQLException (java.sql.SQLException)1