use of io.trino.plugin.hive.orc.OrcReaderConfig in project trino by trinodb.
the class TestHiveFileFormats method testOrcUseColumnNames.
@Test(dataProvider = "rowCount")
public void testOrcUseColumnNames(int rowCount) throws Exception {
ConnectorSession session = getHiveSession(new HiveConfig(), new OrcReaderConfig().setUseColumnNames(true));
// Hive binary writers are broken for timestamps
List<TestColumn> testColumns = TEST_COLUMNS.stream().filter(TestHiveFileFormats::withoutTimestamps).collect(toImmutableList());
assertThatFileFormat(ORC).withWriteColumns(testColumns).withRowsCount(rowCount).withReadColumns(Lists.reverse(testColumns)).withSession(session).isReadableByPageSource(new OrcPageSourceFactory(new OrcReaderOptions(), HDFS_ENVIRONMENT, STATS, UTC));
}
use of io.trino.plugin.hive.orc.OrcReaderConfig in project trino by trinodb.
the class TestHiveFileFormats method testORCProjectedColumns.
@Test(dataProvider = "rowCount")
public void testORCProjectedColumns(int rowCount) throws Exception {
List<TestColumn> supportedColumns = TEST_COLUMNS;
List<TestColumn> regularColumns = getRegularColumns(supportedColumns);
List<TestColumn> partitionColumns = getPartitionColumns(supportedColumns);
// Created projected columns for all regular supported columns
ImmutableList.Builder<TestColumn> writeColumnsBuilder = ImmutableList.builder();
ImmutableList.Builder<TestColumn> readeColumnsBuilder = ImmutableList.builder();
generateProjectedColumns(regularColumns, writeColumnsBuilder, readeColumnsBuilder);
List<TestColumn> writeColumns = writeColumnsBuilder.addAll(partitionColumns).build();
List<TestColumn> readColumns = readeColumnsBuilder.addAll(partitionColumns).build();
ConnectorSession session = getHiveSession(new HiveConfig(), new OrcReaderConfig().setUseColumnNames(true));
assertThatFileFormat(ORC).withWriteColumns(writeColumns).withReadColumns(readColumns).withRowsCount(rowCount).withSession(session).isReadableByPageSource(new OrcPageSourceFactory(new OrcReaderOptions(), HDFS_ENVIRONMENT, STATS, UTC));
assertThatFileFormat(ORC).withWriteColumns(writeColumns).withReadColumns(readColumns).withRowsCount(rowCount).isReadableByPageSource(new OrcPageSourceFactory(new OrcReaderOptions(), HDFS_ENVIRONMENT, STATS, UTC));
}
Aggregations