Search in sources :

Example 1 with OrcWriterConfig

use of io.trino.plugin.hive.orc.OrcWriterConfig in project trino by trinodb.

the class TestHiveFileFormats method testOrcOptimizedWriter.

@Test(dataProvider = "validRowAndFileSizePadding")
public void testOrcOptimizedWriter(int rowCount, long fileSizePadding) throws Exception {
    HiveSessionProperties hiveSessionProperties = new HiveSessionProperties(new HiveConfig(), new OrcReaderConfig(), new OrcWriterConfig().setValidationPercentage(100.0), new ParquetReaderConfig(), new ParquetWriterConfig());
    ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(hiveSessionProperties.getSessionProperties()).build();
    // A Trino page cannot contain a map with null keys, so a page based writer cannot write null keys
    List<TestColumn> testColumns = TEST_COLUMNS.stream().filter(TestHiveFileFormats::withoutNullMapKeyTests).collect(toList());
    assertThatFileFormat(ORC).withColumns(testColumns).withRowsCount(rowCount).withSession(session).withFileSizePadding(fileSizePadding).withFileWriterFactory(new OrcFileWriterFactory(HDFS_ENVIRONMENT, TESTING_TYPE_MANAGER, new NodeVersion("test"), STATS, new OrcWriterOptions())).isReadableByRecordCursor(createGenericHiveRecordCursorProvider(HDFS_ENVIRONMENT)).isReadableByPageSource(new OrcPageSourceFactory(new OrcReaderOptions(), HDFS_ENVIRONMENT, STATS, UTC));
}
Also used : ParquetWriterConfig(io.trino.plugin.hive.parquet.ParquetWriterConfig) OrcWriterConfig(io.trino.plugin.hive.orc.OrcWriterConfig) OrcPageSourceFactory(io.trino.plugin.hive.orc.OrcPageSourceFactory) OrcFileWriterFactory(io.trino.plugin.hive.orc.OrcFileWriterFactory) OrcWriterOptions(io.trino.orc.OrcWriterOptions) OrcReaderConfig(io.trino.plugin.hive.orc.OrcReaderConfig) OrcReaderOptions(io.trino.orc.OrcReaderOptions) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) ParquetReaderConfig(io.trino.plugin.hive.parquet.ParquetReaderConfig) Test(org.testng.annotations.Test)

Example 2 with OrcWriterConfig

use of io.trino.plugin.hive.orc.OrcWriterConfig in project trino by trinodb.

the class TestOrcPageSourceMemoryTracking method testMaxReadBytes.

@Test(dataProvider = "rowCount")
public void testMaxReadBytes(int rowCount) throws Exception {
    int maxReadBytes = 1_000;
    HiveSessionProperties hiveSessionProperties = new HiveSessionProperties(new HiveConfig(), new OrcReaderConfig().setMaxBlockSize(DataSize.ofBytes(maxReadBytes)), new OrcWriterConfig(), new ParquetReaderConfig(), new ParquetWriterConfig());
    ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(hiveSessionProperties.getSessionProperties()).build();
    FileFormatDataSourceStats stats = new FileFormatDataSourceStats();
    // Build a table where every row gets larger, so we can test that the "batchSize" reduces
    int numColumns = 5;
    int step = 250;
    ImmutableList.Builder<TestColumn> columnBuilder = ImmutableList.<TestColumn>builder().add(new TestColumn("p_empty_string", javaStringObjectInspector, () -> "", true));
    GrowingTestColumn[] dataColumns = new GrowingTestColumn[numColumns];
    for (int i = 0; i < numColumns; i++) {
        dataColumns[i] = new GrowingTestColumn("p_string" + "_" + i, javaStringObjectInspector, () -> Long.toHexString(random.nextLong()), false, step * (i + 1));
        columnBuilder.add(dataColumns[i]);
    }
    List<TestColumn> testColumns = columnBuilder.build();
    File tempFile = File.createTempFile("trino_test_orc_page_source_max_read_bytes", "orc");
    tempFile.delete();
    TestPreparer testPreparer = new TestPreparer(tempFile.getAbsolutePath(), testColumns, rowCount, rowCount);
    ConnectorPageSource pageSource = testPreparer.newPageSource(stats, session);
    try {
        int positionCount = 0;
        while (true) {
            Page page = pageSource.getNextPage();
            if (pageSource.isFinished()) {
                break;
            }
            assertNotNull(page);
            page = page.getLoadedPage();
            positionCount += page.getPositionCount();
            // ignore the first MAX_BATCH_SIZE rows given the sizes are set when loading the blocks
            if (positionCount > MAX_BATCH_SIZE) {
                // either the block is bounded by maxReadBytes or we just load one single large block
                // an error margin MAX_BATCH_SIZE / step is needed given the block sizes are increasing
                assertTrue(page.getSizeInBytes() < maxReadBytes * (MAX_BATCH_SIZE / step) || 1 == page.getPositionCount());
            }
        }
        // verify the stats are correctly recorded
        Distribution distribution = stats.getMaxCombinedBytesPerRow().getAllTime();
        assertEquals((int) distribution.getCount(), 1);
        // the block is VariableWidthBlock that contains valueIsNull and offsets arrays as overhead
        assertEquals((int) distribution.getMax(), Arrays.stream(dataColumns).mapToInt(GrowingTestColumn::getMaxSize).sum() + (Integer.BYTES + Byte.BYTES) * numColumns);
        pageSource.close();
    } finally {
        tempFile.delete();
    }
}
Also used : ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) ImmutableList(com.google.common.collect.ImmutableList) ParquetWriterConfig(io.trino.plugin.hive.parquet.ParquetWriterConfig) OrcWriterConfig(io.trino.plugin.hive.orc.OrcWriterConfig) Page(io.trino.spi.Page) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) OrcReaderConfig(io.trino.plugin.hive.orc.OrcReaderConfig) Distribution(io.airlift.stats.Distribution) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File) OrcFile(org.apache.hadoop.hive.ql.io.orc.OrcFile) ParquetReaderConfig(io.trino.plugin.hive.parquet.ParquetReaderConfig) Test(org.testng.annotations.Test)

Example 3 with OrcWriterConfig

use of io.trino.plugin.hive.orc.OrcWriterConfig in project trino by trinodb.

the class TestOrcWriterConfig method testExplicitPropertyMappings.

@Test
public void testExplicitPropertyMappings() {
    Map<String, String> properties = ImmutableMap.<String, String>builder().put("hive.orc.writer.stripe-min-size", "13MB").put("hive.orc.writer.stripe-max-size", "27MB").put("hive.orc.writer.stripe-max-rows", "44").put("hive.orc.writer.row-group-max-rows", "11").put("hive.orc.writer.dictionary-max-memory", "13MB").put("hive.orc.writer.string-statistics-limit", "17MB").put("hive.orc.writer.max-compression-buffer-size", "19MB").put("hive.orc.default-bloom-filter-fpp", "0.96").put("hive.orc.writer.writer-identification", "LEGACY_HIVE_COMPATIBLE").put("hive.orc.writer.validation-percentage", "0.16").put("hive.orc.writer.validation-mode", "DETAILED").buildOrThrow();
    OrcWriterConfig expected = new OrcWriterConfig().setStripeMinSize(DataSize.of(13, MEGABYTE)).setStripeMaxSize(DataSize.of(27, MEGABYTE)).setStripeMaxRowCount(44).setRowGroupMaxRowCount(11).setDictionaryMaxMemory(DataSize.of(13, MEGABYTE)).setStringStatisticsLimit(DataSize.of(17, MEGABYTE)).setMaxCompressionBufferSize(DataSize.of(19, MEGABYTE)).setDefaultBloomFilterFpp(0.96).setWriterIdentification(WriterIdentification.LEGACY_HIVE_COMPATIBLE).setValidationPercentage(0.16).setValidationMode(OrcWriteValidationMode.DETAILED);
    assertFullMapping(properties, expected);
}
Also used : OrcWriterConfig(io.trino.plugin.hive.orc.OrcWriterConfig) Test(org.testng.annotations.Test)

Example 4 with OrcWriterConfig

use of io.trino.plugin.hive.orc.OrcWriterConfig in project trino by trinodb.

the class ParquetTester method assertMaxReadBytes.

void assertMaxReadBytes(List<ObjectInspector> objectInspectors, Iterable<?>[] writeValues, Iterable<?>[] readValues, List<String> columnNames, List<Type> columnTypes, Optional<MessageType> parquetSchema, DataSize maxReadBlockSize) throws Exception {
    CompressionCodecName compressionCodecName = UNCOMPRESSED;
    HiveSessionProperties hiveSessionProperties = new HiveSessionProperties(new HiveConfig().setHiveStorageFormat(HiveStorageFormat.PARQUET).setUseParquetColumnNames(false), new OrcReaderConfig(), new OrcWriterConfig(), new ParquetReaderConfig().setMaxReadBlockSize(maxReadBlockSize), new ParquetWriterConfig());
    ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(hiveSessionProperties.getSessionProperties()).build();
    try (TempFile tempFile = new TempFile("test", "parquet")) {
        JobConf jobConf = new JobConf();
        jobConf.setEnum(COMPRESSION, compressionCodecName);
        jobConf.setBoolean(ENABLE_DICTIONARY, true);
        jobConf.setEnum(WRITER_VERSION, PARQUET_1_0);
        writeParquetColumn(jobConf, tempFile.getFile(), compressionCodecName, createTableProperties(columnNames, objectInspectors), getStandardStructObjectInspector(columnNames, objectInspectors), getIterators(writeValues), parquetSchema, false);
        Iterator<?>[] expectedValues = getIterators(readValues);
        try (ConnectorPageSource pageSource = fileFormat.createFileFormatReader(session, HDFS_ENVIRONMENT, tempFile.getFile(), columnNames, columnTypes)) {
            assertPageSource(columnTypes, expectedValues, pageSource, Optional.of(getParquetMaxReadBlockSize(session).toBytes()));
            assertFalse(stream(expectedValues).allMatch(Iterator::hasNext));
        }
    }
}
Also used : OrcWriterConfig(io.trino.plugin.hive.orc.OrcWriterConfig) ConnectorPageSource(io.trino.spi.connector.ConnectorPageSource) HiveSessionProperties(io.trino.plugin.hive.HiveSessionProperties) HiveConfig(io.trino.plugin.hive.HiveConfig) OrcReaderConfig(io.trino.plugin.hive.orc.OrcReaderConfig) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) AbstractIterator(com.google.common.collect.AbstractIterator) Iterator(java.util.Iterator) ConnectorSession(io.trino.spi.connector.ConnectorSession) TestingConnectorSession(io.trino.testing.TestingConnectorSession) JobConf(org.apache.hadoop.mapred.JobConf)

Aggregations

OrcWriterConfig (io.trino.plugin.hive.orc.OrcWriterConfig)4 OrcReaderConfig (io.trino.plugin.hive.orc.OrcReaderConfig)3 ConnectorSession (io.trino.spi.connector.ConnectorSession)3 TestingConnectorSession (io.trino.testing.TestingConnectorSession)3 Test (org.testng.annotations.Test)3 ParquetReaderConfig (io.trino.plugin.hive.parquet.ParquetReaderConfig)2 ParquetWriterConfig (io.trino.plugin.hive.parquet.ParquetWriterConfig)2 ConnectorPageSource (io.trino.spi.connector.ConnectorPageSource)2 AbstractIterator (com.google.common.collect.AbstractIterator)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)1 Distribution (io.airlift.stats.Distribution)1 OrcReaderOptions (io.trino.orc.OrcReaderOptions)1 OrcWriterOptions (io.trino.orc.OrcWriterOptions)1 HiveConfig (io.trino.plugin.hive.HiveConfig)1 HiveSessionProperties (io.trino.plugin.hive.HiveSessionProperties)1 OrcFileWriterFactory (io.trino.plugin.hive.orc.OrcFileWriterFactory)1 OrcPageSourceFactory (io.trino.plugin.hive.orc.OrcPageSourceFactory)1 Page (io.trino.spi.Page)1 File (java.io.File)1