Search in sources :

Example 11 with MaterializedResult

use of io.trino.testing.MaterializedResult in project trino by trinodb.

the class AbstractTestHive method doTestTransactionDeleteInsert.

protected void doTestTransactionDeleteInsert(HiveStorageFormat storageFormat, boolean allowInsertExisting, List<TransactionDeleteInsertTestCase> testCases) throws Exception {
    // There are 4 types of operations on a partition: add, drop, alter (drop then add), insert existing.
    // There are 12 partitions in this test, 3 for each type.
    // 3 is chosen to verify that cleanups, commit aborts, rollbacks are always as complete as possible regardless of failure.
    MaterializedResult beforeData = MaterializedResult.resultBuilder(SESSION, BIGINT, createUnboundedVarcharType(), createUnboundedVarcharType()).row(110L, "a", "alter1").row(120L, "a", "insert1").row(140L, "a", "drop1").row(210L, "b", "drop2").row(310L, "c", "alter2").row(320L, "c", "alter3").row(510L, "e", "drop3").row(610L, "f", "insert2").row(620L, "f", "insert3").build();
    Domain domainToDrop = Domain.create(ValueSet.of(createUnboundedVarcharType(), utf8Slice("alter1"), utf8Slice("alter2"), utf8Slice("alter3"), utf8Slice("drop1"), utf8Slice("drop2"), utf8Slice("drop3")), false);
    List<MaterializedRow> extraRowsForInsertExisting = ImmutableList.of();
    if (allowInsertExisting) {
        extraRowsForInsertExisting = MaterializedResult.resultBuilder(SESSION, BIGINT, createUnboundedVarcharType(), createUnboundedVarcharType()).row(121L, "a", "insert1").row(611L, "f", "insert2").row(621L, "f", "insert3").build().getMaterializedRows();
    }
    MaterializedResult insertData = MaterializedResult.resultBuilder(SESSION, BIGINT, createUnboundedVarcharType(), createUnboundedVarcharType()).row(111L, "a", "alter1").row(131L, "a", "add1").row(221L, "b", "add2").row(311L, "c", "alter2").row(321L, "c", "alter3").row(411L, "d", "add3").rows(extraRowsForInsertExisting).build();
    MaterializedResult afterData = MaterializedResult.resultBuilder(SESSION, BIGINT, createUnboundedVarcharType(), createUnboundedVarcharType()).row(120L, "a", "insert1").row(610L, "f", "insert2").row(620L, "f", "insert3").rows(insertData.getMaterializedRows()).build();
    for (TransactionDeleteInsertTestCase testCase : testCases) {
        SchemaTableName temporaryDeleteInsert = temporaryTable("delete_insert");
        try {
            createEmptyTable(temporaryDeleteInsert, storageFormat, ImmutableList.of(new Column("col1", HIVE_LONG, Optional.empty())), ImmutableList.of(new Column("pk1", HIVE_STRING, Optional.empty()), new Column("pk2", HIVE_STRING, Optional.empty())));
            insertData(temporaryDeleteInsert, beforeData);
            try {
                doTestTransactionDeleteInsert(storageFormat, temporaryDeleteInsert, domainToDrop, insertData, testCase.isExpectCommitedData() ? afterData : beforeData, testCase.getTag(), testCase.isExpectQuerySucceed(), testCase.getConflictTrigger());
            } catch (AssertionError e) {
                throw new AssertionError(format("Test case: %s", testCase), e);
            }
        } finally {
            dropTable(temporaryDeleteInsert);
        }
    }
}
Also used : HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) Column(io.trino.plugin.hive.metastore.Column) ViewColumn(io.trino.spi.connector.ConnectorViewDefinition.ViewColumn) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) MaterializedResult(io.trino.testing.MaterializedResult) Domain(io.trino.spi.predicate.Domain) TupleDomain(io.trino.spi.predicate.TupleDomain) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) MaterializedRow(io.trino.testing.MaterializedRow)

Example 12 with MaterializedResult

use of io.trino.testing.MaterializedResult in project trino by trinodb.

the class AbstractTestHive method assertGetRecords.

protected void assertGetRecords(HiveStorageFormat hiveStorageFormat, ConnectorTableMetadata tableMetadata, HiveSplit hiveSplit, ConnectorPageSource pageSource, List<? extends ColumnHandle> columnHandles) throws IOException {
    try {
        MaterializedResult result = materializeSourceDataStream(newSession(), pageSource, getTypes(columnHandles));
        assertPageSourceType(pageSource, hiveStorageFormat);
        ImmutableMap<String, Integer> columnIndex = indexColumns(tableMetadata);
        long rowNumber = 0;
        long completedBytes = 0;
        for (MaterializedRow row : result) {
            try {
                assertValueTypes(row, tableMetadata.getColumns());
            } catch (RuntimeException e) {
                throw new RuntimeException("row " + rowNumber, e);
            }
            rowNumber++;
            Integer index;
            Object value;
            // STRING
            index = columnIndex.get("t_string");
            value = row.getField(index);
            if (rowNumber % 19 == 0) {
                assertNull(value);
            } else if (rowNumber % 19 == 1) {
                assertEquals(value, "");
            } else {
                assertEquals(value, "test");
            }
            // NUMBERS
            assertEquals(row.getField(columnIndex.get("t_tinyint")), (byte) (1 + rowNumber));
            assertEquals(row.getField(columnIndex.get("t_smallint")), (short) (2 + rowNumber));
            assertEquals(row.getField(columnIndex.get("t_int")), (int) (3 + rowNumber));
            index = columnIndex.get("t_bigint");
            if ((rowNumber % 13) == 0) {
                assertNull(row.getField(index));
            } else {
                assertEquals(row.getField(index), 4 + rowNumber);
            }
            assertEquals((Float) row.getField(columnIndex.get("t_float")), 5.1f + rowNumber, 0.001);
            assertEquals(row.getField(columnIndex.get("t_double")), 6.2 + rowNumber);
            // BOOLEAN
            index = columnIndex.get("t_boolean");
            if ((rowNumber % 3) == 2) {
                assertNull(row.getField(index));
            } else {
                assertEquals(row.getField(index), (rowNumber % 3) != 0);
            }
            // TIMESTAMP
            index = columnIndex.get("t_timestamp");
            if (index != null) {
                if ((rowNumber % 17) == 0) {
                    assertNull(row.getField(index));
                } else {
                    SqlTimestamp expected = sqlTimestampOf(3, 2011, 5, 6, 7, 8, 9, 123);
                    assertEquals(row.getField(index), expected);
                }
            }
            // BINARY
            index = columnIndex.get("t_binary");
            if (index != null) {
                if ((rowNumber % 23) == 0) {
                    assertNull(row.getField(index));
                } else {
                    assertEquals(row.getField(index), new SqlVarbinary("test binary".getBytes(UTF_8)));
                }
            }
            // DATE
            index = columnIndex.get("t_date");
            if (index != null) {
                if ((rowNumber % 37) == 0) {
                    assertNull(row.getField(index));
                } else {
                    SqlDate expected = new SqlDate(toIntExact(MILLISECONDS.toDays(new DateTime(2013, 8, 9, 0, 0, 0, UTC).getMillis())));
                    assertEquals(row.getField(index), expected);
                }
            }
            // VARCHAR(50)
            index = columnIndex.get("t_varchar");
            if (index != null) {
                value = row.getField(index);
                if (rowNumber % 39 == 0) {
                    assertNull(value);
                } else if (rowNumber % 39 == 1) {
                    // RCBINARY reads empty VARCHAR as null
                    if (hiveStorageFormat == RCBINARY) {
                        assertNull(value);
                    } else {
                        assertEquals(value, "");
                    }
                } else {
                    assertEquals(value, "test varchar");
                }
            }
            // CHAR(25)
            index = columnIndex.get("t_char");
            if (index != null) {
                value = row.getField(index);
                if ((rowNumber % 41) == 0) {
                    assertNull(value);
                } else {
                    assertEquals(value, (rowNumber % 41) == 1 ? "                         " : "test char                ");
                }
            }
            // MAP<STRING, STRING>
            index = columnIndex.get("t_map");
            if (index != null) {
                if ((rowNumber % 27) == 0) {
                    assertNull(row.getField(index));
                } else {
                    assertEquals(row.getField(index), ImmutableMap.of("test key", "test value"));
                }
            }
            // ARRAY<STRING>
            index = columnIndex.get("t_array_string");
            if (index != null) {
                if ((rowNumber % 29) == 0) {
                    assertNull(row.getField(index));
                } else {
                    assertEquals(row.getField(index), ImmutableList.of("abc", "xyz", "data"));
                }
            }
            // ARRAY<TIMESTAMP>
            index = columnIndex.get("t_array_timestamp");
            if (index != null) {
                if ((rowNumber % 43) == 0) {
                    assertNull(row.getField(index));
                } else {
                    SqlTimestamp expected = sqlTimestampOf(3, LocalDateTime.of(2011, 5, 6, 7, 8, 9, 123_000_000));
                    assertEquals(row.getField(index), ImmutableList.of(expected));
                }
            }
            // ARRAY<STRUCT<s_string: STRING, s_double:DOUBLE>>
            index = columnIndex.get("t_array_struct");
            if (index != null) {
                if ((rowNumber % 31) == 0) {
                    assertNull(row.getField(index));
                } else {
                    List<Object> expected1 = ImmutableList.of("test abc", 0.1);
                    List<Object> expected2 = ImmutableList.of("test xyz", 0.2);
                    assertEquals(row.getField(index), ImmutableList.of(expected1, expected2));
                }
            }
            // STRUCT<s_string: STRING, s_double:DOUBLE>
            index = columnIndex.get("t_struct");
            if (index != null) {
                if ((rowNumber % 31) == 0) {
                    assertNull(row.getField(index));
                } else {
                    assertTrue(row.getField(index) instanceof List);
                    List<?> values = (List<?>) row.getField(index);
                    assertEquals(values.size(), 2);
                    assertEquals(values.get(0), "test abc");
                    assertEquals(values.get(1), 0.1);
                }
            }
            // MAP<INT, ARRAY<STRUCT<s_string: STRING, s_double:DOUBLE>>>
            index = columnIndex.get("t_complex");
            if (index != null) {
                if ((rowNumber % 33) == 0) {
                    assertNull(row.getField(index));
                } else {
                    List<Object> expected1 = ImmutableList.of("test abc", 0.1);
                    List<Object> expected2 = ImmutableList.of("test xyz", 0.2);
                    assertEquals(row.getField(index), ImmutableMap.of(1, ImmutableList.of(expected1, expected2)));
                }
            }
            // NEW COLUMN
            assertNull(row.getField(columnIndex.get("new_column")));
            long newCompletedBytes = pageSource.getCompletedBytes();
            assertTrue(newCompletedBytes >= completedBytes);
            // some formats (e.g., parquet) over read the data by a bit
            assertLessThanOrEqual(newCompletedBytes, hiveSplit.getLength() + (100 * 1024));
            completedBytes = newCompletedBytes;
        }
        assertLessThanOrEqual(completedBytes, hiveSplit.getLength() + (100 * 1024));
        assertEquals(rowNumber, 100);
    } finally {
        pageSource.close();
    }
}
Also used : SqlVarbinary(io.trino.spi.type.SqlVarbinary) SqlTimestamp(io.trino.spi.type.SqlTimestamp) LocalDateTime(java.time.LocalDateTime) DateTime(org.joda.time.DateTime) SqlDate(io.trino.spi.type.SqlDate) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) ImmutableList(com.google.common.collect.ImmutableList) Collectors.toList(java.util.stream.Collectors.toList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) List(java.util.List) MaterializedResult(io.trino.testing.MaterializedResult) MaterializedRow(io.trino.testing.MaterializedRow)

Example 13 with MaterializedResult

use of io.trino.testing.MaterializedResult in project trino by trinodb.

the class AbstractTestHive method doInsert.

private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    // creating the table
    doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS);
    MaterializedResult.Builder resultBuilder = MaterializedResult.resultBuilder(SESSION, CREATE_TABLE_DATA.getTypes());
    for (int i = 0; i < 3; i++) {
        insertData(tableName, CREATE_TABLE_DATA);
        try (Transaction transaction = newTransaction()) {
            ConnectorSession session = newSession();
            ConnectorMetadata metadata = transaction.getMetadata();
            metadata.beginQuery(session);
            // load the new table
            ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
            List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
            // verify the metadata
            ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
            assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), CREATE_TABLE_COLUMNS);
            // verify the data
            resultBuilder.rows(CREATE_TABLE_DATA.getMaterializedRows());
            MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
            assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
            // statistics
            HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName);
            assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * (i + 1));
            assertEquals(tableStatistics.getFileCount().getAsLong(), i + 1L);
            assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
            assertGreaterThan(tableStatistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
        }
    }
    // test rollback
    Set<String> existingFiles;
    try (Transaction transaction = newTransaction()) {
        existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
        assertFalse(existingFiles.isEmpty());
    }
    Path stagingPathRoot;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        // "stage" insert data
        ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
        sink.appendPage(CREATE_TABLE_DATA.toPage());
        sink.appendPage(CREATE_TABLE_DATA.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
        // statistics, visible from within transaction
        HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName);
        assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 5L);
        try (Transaction otherTransaction = newTransaction()) {
            // statistics, not visible from outside transaction
            HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName);
            assertEquals(otherTableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L);
        }
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
        // verify all temp files start with the unique prefix
        stagingPathRoot = getStagingPathRoot(insertTableHandle);
        HdfsContext context = new HdfsContext(session);
        Set<String> tempFiles = listAllDataFiles(context, stagingPathRoot);
        assertTrue(!tempFiles.isEmpty());
        for (String filePath : tempFiles) {
            assertThat(new Path(filePath).getName()).startsWith(session.getQueryId());
        }
        // rollback insert
        transaction.rollback();
    }
    // verify temp directory is empty
    HdfsContext context = new HdfsContext(newSession());
    assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty());
    // verify the data is unchanged
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
        assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
        // verify we did not modify the table directory
        assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
    }
    // verify statistics unchanged
    try (Transaction transaction = newTransaction()) {
        HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName);
        assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L);
        assertEquals(statistics.getFileCount().getAsLong(), 3L);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) ConnectorInsertTableHandle(io.trino.spi.connector.ConnectorInsertTableHandle) Constraint(io.trino.spi.connector.Constraint) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) MaterializedResult(io.trino.testing.MaterializedResult) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata)

Example 14 with MaterializedResult

use of io.trino.testing.MaterializedResult in project trino by trinodb.

the class AbstractTestHive method doCreateTable.

protected void doCreateTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
    String queryId;
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        queryId = session.getQueryId();
        // begin creating the table
        ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, CREATE_TABLE_COLUMNS, createTableProperties(storageFormat));
        ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
        // write the data
        ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
        sink.appendPage(CREATE_TABLE_DATA.toPage());
        Collection<Slice> fragments = getFutureValue(sink.finish());
        // verify all new files start with the unique prefix
        HdfsContext context = new HdfsContext(session);
        for (String filePath : listAllDataFiles(context, getStagingPathRoot(outputHandle))) {
            assertThat(new Path(filePath).getName()).startsWith(session.getQueryId());
        }
        // commit the table
        metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
        transaction.commit();
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        // load the new table
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        // verify the metadata
        ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
        assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), CREATE_TABLE_COLUMNS);
        // verify the data
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_DATA.getMaterializedRows());
        // verify the node version and query ID in table
        Table table = getMetastoreClient().getTable(tableName.getSchemaName(), tableName.getTableName()).get();
        assertEquals(table.getParameters().get(PRESTO_VERSION_NAME), TEST_SERVER_VERSION);
        assertEquals(table.getParameters().get(PRESTO_QUERY_ID_NAME), queryId);
        // verify basic statistics
        HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName);
        assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount());
        assertEquals(statistics.getFileCount().getAsLong(), 1L);
        assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
        assertGreaterThan(statistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) Table(io.trino.plugin.hive.metastore.Table) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) ConnectorOutputTableHandle(io.trino.spi.connector.ConnectorOutputTableHandle) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) ConnectorPageSink(io.trino.spi.connector.ConnectorPageSink) MaterializedResult(io.trino.testing.MaterializedResult) ConnectorTableMetadata(io.trino.spi.connector.ConnectorTableMetadata)

Example 15 with MaterializedResult

use of io.trino.testing.MaterializedResult in project trino by trinodb.

the class AbstractTestHive method assertEmptyFile.

private void assertEmptyFile(HiveStorageFormat format) throws Exception {
    SchemaTableName tableName = temporaryTable("empty_file");
    try {
        List<Column> columns = ImmutableList.of(new Column("test", HIVE_STRING, Optional.empty()));
        createEmptyTable(tableName, format, columns, ImmutableList.of());
        try (Transaction transaction = newTransaction()) {
            ConnectorSession session = newSession();
            ConnectorMetadata metadata = transaction.getMetadata();
            metadata.beginQuery(session);
            ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
            List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
            Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(AssertionError::new);
            // verify directory is empty
            HdfsContext context = new HdfsContext(session);
            Path location = new Path(table.getStorage().getLocation());
            assertTrue(listDirectory(context, location).isEmpty());
            // read table with empty directory
            readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.of(ORC));
            // create empty file
            FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, location);
            assertTrue(fileSystem.createNewFile(new Path(location, "empty-file")));
            assertEquals(listDirectory(context, location), ImmutableList.of("empty-file"));
            // read table with empty file
            MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.of(0), Optional.empty());
            assertEquals(result.getRowCount(), 0);
        }
    } finally {
        dropTable(tableName);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HiveSessionProperties.getTemporaryStagingDirectoryPath(io.trino.plugin.hive.HiveSessionProperties.getTemporaryStagingDirectoryPath) HiveColumnHandle.bucketColumnHandle(io.trino.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.trino.spi.connector.ColumnHandle) Table(io.trino.plugin.hive.metastore.Table) CatalogSchemaTableName(io.trino.spi.connector.CatalogSchemaTableName) SchemaTableName(io.trino.spi.connector.SchemaTableName) ConnectorTableHandle(io.trino.spi.connector.ConnectorTableHandle) HiveColumnHandle.createBaseColumn(io.trino.plugin.hive.HiveColumnHandle.createBaseColumn) Column(io.trino.plugin.hive.metastore.Column) ViewColumn(io.trino.spi.connector.ConnectorViewDefinition.ViewColumn) SortingColumn(io.trino.plugin.hive.metastore.SortingColumn) FileSystem(org.apache.hadoop.fs.FileSystem) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ConnectorMetadata(io.trino.spi.connector.ConnectorMetadata) HdfsContext(io.trino.plugin.hive.HdfsEnvironment.HdfsContext) MaterializedResult(io.trino.testing.MaterializedResult)

Aggregations

MaterializedResult (io.trino.testing.MaterializedResult)328 Test (org.testng.annotations.Test)248 Page (io.trino.spi.Page)103 BaseConnectorTest (io.trino.testing.BaseConnectorTest)64 MaterializedRow (io.trino.testing.MaterializedRow)63 Type (io.trino.spi.type.Type)61 RowPagesBuilder (io.trino.RowPagesBuilder)57 PlanNodeId (io.trino.sql.planner.plan.PlanNodeId)51 OperatorAssertion.toMaterializedResult (io.trino.operator.OperatorAssertion.toMaterializedResult)41 TaskContext (io.trino.operator.TaskContext)37 TestingTaskContext (io.trino.testing.TestingTaskContext)37 OperatorFactory (io.trino.operator.OperatorFactory)31 Language (org.intellij.lang.annotations.Language)31 ConnectorSession (io.trino.spi.connector.ConnectorSession)27 Session (io.trino.Session)24 ColumnHandle (io.trino.spi.connector.ColumnHandle)24 ConnectorMetadata (io.trino.spi.connector.ConnectorMetadata)24 ConnectorTableHandle (io.trino.spi.connector.ConnectorTableHandle)23 ImmutableList (com.google.common.collect.ImmutableList)22 Constraint (io.trino.spi.connector.Constraint)20