Search in sources :

Example 1 with Flaky

use of io.trino.testng.services.Flaky in project trino by trinodb.

the class TestHivePartitionsTable method testShowPartitionsFromHiveTableWithTooManyPartitions.

@Test(groups = HIVE_PARTITIONING)
@Flaky(issue = ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE, match = ERROR_COMMITTING_WRITE_TO_HIVE_MATCH)
public void testShowPartitionsFromHiveTableWithTooManyPartitions() {
    String tableName = tablesState.get(PARTITIONED_TABLE_WITH_VARIABLE_PARTITIONS).getNameInDatabase();
    String partitionsTable = "\"" + tableName + "$partitions\"";
    createPartitions(tableName, TOO_MANY_PARTITIONS);
    // Verify we created enough partitions for the test to be meaningful
    assertThatThrownBy(() -> onTrino().executeQuery("SELECT * FROM " + tableName)).hasMessageMatching(".*: Query over table '\\S+' can potentially read more than \\d+ partitions");
    QueryResult partitionListResult;
    partitionListResult = onTrino().executeQuery(format("SELECT * FROM %s WHERE part_col < 7", partitionsTable));
    assertThat(partitionListResult).containsExactlyInOrder(row(0), row(1), row(2), row(3), row(4), row(5), row(6));
    assertColumnNames(partitionListResult, "part_col");
    partitionListResult = onTrino().executeQuery(format("SELECT a.part_col FROM (SELECT * FROM %s WHERE part_col = 1) a, (SELECT * FROM %s WHERE part_col = 1) b WHERE a.col = b.col", tableName, tableName));
    assertThat(partitionListResult).containsExactlyInOrder(row(1));
    partitionListResult = onTrino().executeQuery(format("SELECT * FROM %s WHERE part_col < -10", partitionsTable));
    assertThat(partitionListResult).hasNoRows();
    partitionListResult = onTrino().executeQuery(format("SELECT * FROM %s ORDER BY part_col LIMIT 7", partitionsTable));
    assertThat(partitionListResult).containsExactlyInOrder(row(0), row(1), row(2), row(3), row(4), row(5), row(6));
}
Also used : QueryResult(io.trino.tempto.query.QueryResult) Test(org.testng.annotations.Test) ProductTest(io.trino.tempto.ProductTest) Flaky(io.trino.testng.services.Flaky)

Example 2 with Flaky

use of io.trino.testng.services.Flaky in project trino by trinodb.

the class TestMemoryConnectorTest method testCustomMetricsScanFilter.

@Test
// TODO (https://github.com/trinodb/trino/issues/8691) fix the test
@Flaky(issue = "https://github.com/trinodb/trino/issues/8691", match = "ComparisonFailure: expected:<LongCount\\{total=\\[\\d+]}> but was:<(LongCount\\{total=\\[\\d+]}|null)>")
public void testCustomMetricsScanFilter() {
    Metrics metrics = collectCustomMetrics("SELECT partkey FROM part WHERE partkey % 1000 > 0");
    assertThat(metrics.getMetrics().get("rows")).isEqualTo(new LongCount(PART_COUNT));
    assertThat(metrics.getMetrics().get("started")).isEqualTo(metrics.getMetrics().get("finished"));
    assertThat(((Count<?>) metrics.getMetrics().get("finished")).getTotal()).isGreaterThan(0);
}
Also used : Metrics(io.trino.spi.metrics.Metrics) LongCount(io.trino.plugin.base.metrics.LongCount) LongCount(io.trino.plugin.base.metrics.LongCount) Count(io.trino.spi.metrics.Count) Test(org.testng.annotations.Test) BaseConnectorTest(io.trino.testing.BaseConnectorTest) Flaky(io.trino.testng.services.Flaky)

Example 3 with Flaky

use of io.trino.testng.services.Flaky in project trino by trinodb.

the class TestSyncPartitionMetadata method testConflictingMixedCasePartitionNames.

@Test(groups = { HIVE_PARTITIONING, SMOKE })
@Flaky(issue = ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE, match = ERROR_COMMITTING_WRITE_TO_HIVE_MATCH)
public void testConflictingMixedCasePartitionNames() {
    String tableName = "test_sync_partition_mixed_case";
    prepare(hdfsClient, hdfsDataSourceWriter, tableName);
    HiveDataSource dataSource = createResourceDataSource(tableName, "io/trino/tests/product/hive/data/single_int_column/data.orc");
    // this conflicts with a partition that already exits in the metastore
    hdfsDataSourceWriter.ensureDataOnHdfs(tableLocation(tableName) + "/COL_X=a/cOl_y=1", dataSource);
    assertThatThrownBy(() -> onTrino().executeQuery("CALL system.sync_partition_metadata('default', '" + tableName + "', 'ADD', false)")).hasMessageContaining(format("One or more partitions already exist for table 'default.%s'", tableName));
    assertPartitions(tableName, row("a", "1"), row("b", "2"));
}
Also used : HiveDataSource(io.trino.tempto.fulfillment.table.hive.HiveDataSource) ProductTest(io.trino.tempto.ProductTest) Test(org.testng.annotations.Test) Flaky(io.trino.testng.services.Flaky)

Example 4 with Flaky

use of io.trino.testng.services.Flaky in project trino by trinodb.

the class TestHiveBucketedTables method testBucketingWithUnsupportedDataTypes.

@Test(dataProvider = "testBucketingWithUnsupportedDataTypesDataProvider")
@Flaky(issue = ERROR_COMMITTING_WRITE_TO_HIVE_ISSUE, match = ERROR_COMMITTING_WRITE_TO_HIVE_MATCH)
public void testBucketingWithUnsupportedDataTypes(BucketingType bucketingType, String columnToBeBucketed) {
    try (TemporaryHiveTable table = temporaryHiveTable("table_with_unsupported_bucketing_types_" + randomTableSuffix())) {
        String tableName = table.getName();
        onHive().executeQuery(format("CREATE TABLE %s (" + "n_integer       INT," + "n_decimal       DECIMAL(9, 2)," + "n_timestamp     TIMESTAMP," + "n_char          CHAR(10)," + "n_binary        BINARY," + "n_union         UNIONTYPE<INT,STRING>," + "n_struct        STRUCT<field1:INT,field2:STRING>) " + "CLUSTERED BY (%s) INTO 2 BUCKETS " + "STORED AS ORC " + "%s", tableName, columnToBeBucketed, hiveTableProperties(bucketingType)));
        QueryResult showCreateTableResult = onTrino().executeQuery("SHOW CREATE TABLE " + tableName);
        assertThat(showCreateTableResult).hasRowsCount(1);
        Assertions.assertThat((String) getOnlyElement(getOnlyElement(showCreateTableResult.rows()))).matches(Pattern.compile(format("\\QCREATE TABLE hive.default.%s (\n" + "   n_integer integer,\n" + "   n_decimal decimal(9, 2),\n" + "   n_timestamp timestamp(3),\n" + "   n_char char(10),\n" + "   n_binary varbinary,\n" + "   n_union ROW(tag tinyint, field0 integer, field1 varchar),\n" + "   n_struct ROW(field1 integer, field2 varchar)\n" + ")\n" + "WITH (\\E(?s:.*)" + "bucket_count = 2,\n(?s:.*)" + "bucketed_by = ARRAY\\['%s'\\],\n(?s:.*)" + "bucketing_version = %s,(?s:.*)", tableName, Pattern.quote(columnToBeBucketed), getExpectedBucketVersion(bucketingType))));
        populateRowToHiveTable(tableName, ImmutableList.<String>builder().add("1").add("CAST(1 AS DECIMAL(9, 2))").add("CAST('2015-01-01T00:01:00.15' AS TIMESTAMP)").add("'char value'").add("unhex('00010203')").add("create_union(0, 1, 'union value')").add("named_struct('field1', 1, 'field2', 'Field2')").build(), Optional.empty());
        assertThat(onTrino().executeQuery(format("SELECT * FROM %s", tableName))).hasRowsCount(1);
        assertQueryFailure(() -> onTrino().executeQuery("SELECT \"$bucket\" FROM " + tableName)).hasMessageMatching("Query failed \\(#\\w+\\):\\Q line 1:8: Column '$bucket' cannot be resolved");
        assertQueryFailure(() -> onTrino().executeQuery(format("INSERT INTO %s(n_integer) VALUES (1)", tableName))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot write to a table bucketed on an unsupported type");
        String newTableName = "new_" + tableName;
        assertQueryFailure(() -> onTrino().executeQuery(format("CREATE TABLE %s (LIKE %s INCLUDING PROPERTIES)", newTableName, tableName))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot create a table bucketed on an unsupported type");
        assertQueryFailure(() -> onTrino().executeQuery(format("CREATE TABLE %s (" + "n_integer       integer," + "n_decimal       decimal(9, 2)," + "n_timestamp     timestamp(3)," + "n_char          char(10)," + "n_binary        varbinary," + "n_union         ROW(tag tinyint, field0 integer, field1 varchar)," + "n_struct        ROW(field1 integer, field2 varchar)) " + "WITH (" + "   bucketed_by = ARRAY['%s']," + "   bucket_count = 2" + ")", newTableName, columnToBeBucketed))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot create a table bucketed on an unsupported type");
        assertQueryFailure(() -> onTrino().executeQuery(format("CREATE TABLE %s WITH (%s) AS SELECT * FROM %s", newTableName, bucketingType.getTrinoTableProperties(columnToBeBucketed, 2).stream().collect(joining(",")), tableName))).hasMessageMatching("Query failed \\(#\\w+\\): Cannot create a table bucketed on an unsupported type");
    }
}
Also used : QueryResult(io.trino.tempto.query.QueryResult) TemporaryHiveTable(io.trino.tests.product.hive.util.TemporaryHiveTable) Test(org.testng.annotations.Test) Flaky(io.trino.testng.services.Flaky)

Example 5 with Flaky

use of io.trino.testng.services.Flaky in project trino by trinodb.

the class TestHiveTransactionalTable method testFilesForAbortedTransactionsIgnored.

@Test(groups = HIVE_TRANSACTIONAL)
@Flaky(issue = "https://github.com/trinodb/trino/issues/5463", match = "Expected row count to be <4>, but was <6>")
public void testFilesForAbortedTransactionsIgnored() throws Exception {
    if (getHiveVersionMajor() < 3) {
        throw new SkipException("Hive transactional tables are supported with Hive version 3 or above");
    }
    String tableName = "test_aborted_transaction_table";
    onHive().executeQuery("" + "CREATE TABLE " + tableName + " (col INT) " + "STORED AS ORC " + "TBLPROPERTIES ('transactional'='true')");
    ThriftHiveMetastoreClient client = testHiveMetastoreClientFactory.createMetastoreClient();
    try {
        String selectFromOnePartitionsSql = "SELECT col FROM " + tableName + " ORDER BY COL";
        // Create `delta-A` file
        onHive().executeQuery("INSERT INTO TABLE " + tableName + " VALUES (1),(2)");
        QueryResult onePartitionQueryResult = onTrino().executeQuery(selectFromOnePartitionsSql);
        assertThat(onePartitionQueryResult).containsExactlyInOrder(row(1), row(2));
        String tableLocation = getTablePath(tableName);
        // Insert data to create a valid delta, which creates `delta-B`
        onHive().executeQuery("INSERT INTO TABLE " + tableName + " SELECT 3");
        // Simulate aborted transaction in Hive which has left behind a write directory and file (`delta-C` i.e `delta_0000003_0000003_0000`)
        long transaction = client.openTransaction("test");
        client.allocateTableWriteIds("default", tableName, Collections.singletonList(transaction)).get(0).getWriteId();
        client.abortTransaction(transaction);
        String deltaA = tableLocation + "/delta_0000001_0000001_0000";
        String deltaB = tableLocation + "/delta_0000002_0000002_0000";
        String deltaC = tableLocation + "/delta_0000003_0000003_0000";
        // Delete original `delta-B`, `delta-C`
        hdfsDeleteAll(deltaB);
        hdfsDeleteAll(deltaC);
        // Copy content of `delta-A` to `delta-B`
        hdfsCopyAll(deltaA, deltaB);
        // Verify that data from delta-A and delta-B is visible
        onePartitionQueryResult = onTrino().executeQuery(selectFromOnePartitionsSql);
        assertThat(onePartitionQueryResult).containsOnly(row(1), row(1), row(2), row(2));
        // Copy content of `delta-A` to `delta-C` (which is an aborted transaction)
        hdfsCopyAll(deltaA, deltaC);
        // Verify that delta, corresponding to aborted transaction, is not getting read
        onePartitionQueryResult = onTrino().executeQuery(selectFromOnePartitionsSql);
        assertThat(onePartitionQueryResult).containsOnly(row(1), row(1), row(2), row(2));
    } finally {
        client.close();
        onHive().executeQuery("DROP TABLE " + tableName);
    }
}
Also used : QueryResult(io.trino.tempto.query.QueryResult) ThriftHiveMetastoreClient(io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastoreClient) SkipException(org.testng.SkipException) Test(org.testng.annotations.Test) Flaky(io.trino.testng.services.Flaky)

Aggregations

Flaky (io.trino.testng.services.Flaky)8 Test (org.testng.annotations.Test)8 ProductTest (io.trino.tempto.ProductTest)4 QueryResult (io.trino.tempto.query.QueryResult)4 LongCount (io.trino.plugin.base.metrics.LongCount)2 Count (io.trino.spi.metrics.Count)2 Metrics (io.trino.spi.metrics.Metrics)2 HiveDataSource (io.trino.tempto.fulfillment.table.hive.HiveDataSource)2 BaseConnectorTest (io.trino.testing.BaseConnectorTest)2 ThriftHiveMetastoreClient (io.trino.plugin.hive.metastore.thrift.ThriftHiveMetastoreClient)1 TemporaryHiveTable (io.trino.tests.product.hive.util.TemporaryHiveTable)1 SkipException (org.testng.SkipException)1