Search in sources :

Example 1 with OrcInputStream

use of io.prestosql.orc.stream.OrcInputStream in project hetu-core by openlookeng.

the class OrcFileTail method readFrom.

public static OrcFileTail readFrom(OrcDataSource orcDataSource, Optional<OrcWriteValidation> writeValidation) throws IOException {
    OrcFileTail orcFileTail = new OrcFileTail();
    // 
    // Read the file tail:
    // 
    // variable: Footer
    // variable: Metadata
    // variable: PostScript - contains length of footer and metadata
    // 1 byte: postScriptSize
    // figure out the size of the file using the option or filesystem
    long size = orcDataSource.getSize();
    if (size <= PostScript.MAGIC.length()) {
        throw new OrcCorruptionException(orcDataSource.getId(), "Invalid file size %s", size);
    }
    // Read the tail of the file
    int expectedBufferSize = toIntExact(min(size, EXPECTED_FOOTER_SIZE));
    Slice buffer = orcDataSource.readFully(size - expectedBufferSize, expectedBufferSize);
    // get length of PostScript - last byte of the file
    int postScriptSize = buffer.getUnsignedByte(buffer.length() - SIZE_OF_BYTE);
    if (postScriptSize >= buffer.length()) {
        throw new OrcCorruptionException(orcDataSource.getId(), "Invalid postscript length %s", postScriptSize);
    }
    MetadataReader metadataReader = new ExceptionWrappingMetadataReader(orcDataSource.getId(), new OrcMetadataReader());
    // decode the post script
    try {
        orcFileTail.postScript = metadataReader.readPostScript(buffer.slice(buffer.length() - SIZE_OF_BYTE - postScriptSize, postScriptSize).getInput());
    } catch (OrcCorruptionException e) {
        // check if this is an ORC file and not an RCFile or something else
        if (!isValidHeaderMagic(orcDataSource)) {
            throw new OrcCorruptionException(orcDataSource.getId(), "Not an ORC file");
        }
        throw e;
    }
    // verify this is a supported version
    checkOrcVersion(orcDataSource, orcFileTail.postScript.getVersion());
    validateWrite(validation -> validation.getVersion().equals(orcFileTail.postScript.getVersion()), writeValidation, orcDataSource, "Unexpected version");
    int bufferSize = toIntExact(orcFileTail.postScript.getCompressionBlockSize());
    // check compression codec is supported
    CompressionKind compressionKind = orcFileTail.postScript.getCompression();
    orcFileTail.decompressor = OrcDecompressor.createOrcDecompressor(orcDataSource.getId(), compressionKind, bufferSize);
    validateWrite(validation -> validation.getCompression() == compressionKind, writeValidation, orcDataSource, "Unexpected compression");
    PostScript.HiveWriterVersion hiveWriterVersion = orcFileTail.postScript.getHiveWriterVersion();
    int footerSize = toIntExact(orcFileTail.postScript.getFooterLength());
    int metadataSize = toIntExact(orcFileTail.postScript.getMetadataLength());
    // check if extra bytes need to be read
    Slice completeFooterSlice;
    int completeFooterSize = footerSize + metadataSize + postScriptSize + SIZE_OF_BYTE;
    if (completeFooterSize > buffer.length()) {
        // initial read was not large enough, so just read again with the correct size
        completeFooterSlice = orcDataSource.readFully(size - completeFooterSize, completeFooterSize);
    } else {
        // footer is already in the bytes in buffer, just adjust position, length
        completeFooterSlice = buffer.slice(buffer.length() - completeFooterSize, completeFooterSize);
    }
    // read metadata
    Slice metadataSlice = completeFooterSlice.slice(0, metadataSize);
    try (InputStream metadataInputStream = new OrcInputStream(OrcChunkLoader.create(orcDataSource.getId(), metadataSlice, orcFileTail.decompressor, newSimpleAggregatedMemoryContext()))) {
        orcFileTail.metadata = metadataReader.readMetadata(hiveWriterVersion, metadataInputStream);
    }
    // read footer
    Slice footerSlice = completeFooterSlice.slice(metadataSize, footerSize);
    try (InputStream footerInputStream = new OrcInputStream(OrcChunkLoader.create(orcDataSource.getId(), footerSlice, orcFileTail.decompressor, newSimpleAggregatedMemoryContext()))) {
        orcFileTail.footer = metadataReader.readFooter(hiveWriterVersion, footerInputStream);
    }
    if (orcFileTail.footer.getTypes().size() == 0) {
        throw new OrcCorruptionException(orcDataSource.getId(), "File has no columns");
    }
    validateWrite(validation -> validation.getColumnNames().equals(orcFileTail.footer.getTypes().get(new OrcColumnId(0)).getFieldNames()), writeValidation, orcDataSource, "Unexpected column names");
    validateWrite(validation -> validation.getRowGroupMaxRowCount() == orcFileTail.footer.getRowsInRowGroup(), writeValidation, orcDataSource, "Unexpected rows in group");
    if (writeValidation.isPresent()) {
        writeValidation.get().validateMetadata(orcDataSource.getId(), orcFileTail.footer.getUserMetadata());
        writeValidation.get().validateFileStatistics(orcDataSource.getId(), orcFileTail.footer.getFileStats());
        writeValidation.get().validateStripeStatistics(orcDataSource.getId(), orcFileTail.footer.getStripes(), orcFileTail.metadata.getStripeStatsList());
    }
    return orcFileTail;
}
Also used : OrcColumnId(io.prestosql.orc.metadata.OrcColumnId) CompressionKind(io.prestosql.orc.metadata.CompressionKind) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) InputStream(java.io.InputStream) OrcMetadataReader(io.prestosql.orc.metadata.OrcMetadataReader) ExceptionWrappingMetadataReader(io.prestosql.orc.metadata.ExceptionWrappingMetadataReader) OrcMetadataReader(io.prestosql.orc.metadata.OrcMetadataReader) MetadataReader(io.prestosql.orc.metadata.MetadataReader) PostScript(io.prestosql.orc.metadata.PostScript) Slice(io.airlift.slice.Slice) ExceptionWrappingMetadataReader(io.prestosql.orc.metadata.ExceptionWrappingMetadataReader)

Example 2 with OrcInputStream

use of io.prestosql.orc.stream.OrcInputStream in project hetu-core by openlookeng.

the class StripeReader method readColumnIndexes.

private Map<StreamId, List<RowGroupIndex>> readColumnIndexes(Map<StreamId, Stream> streams, Map<StreamId, OrcChunkLoader> streamsData, Map<OrcColumnId, List<HashableBloomFilter>> bloomFilterIndexes, StripeInformation stripe) throws IOException {
    ImmutableMap.Builder<StreamId, List<RowGroupIndex>> columnIndexes = ImmutableMap.builder();
    for (Entry<StreamId, Stream> entry : streams.entrySet()) {
        Stream stream = entry.getValue();
        if (stream.getStreamKind() == ROW_INDEX) {
            OrcInputStream inputStream = new OrcInputStream(streamsData.get(entry.getKey()));
            List<HashableBloomFilter> bloomFilters = bloomFilterIndexes.get(entry.getKey().getColumnId());
            List<RowGroupIndex> rowGroupIndexes;
            if (orcCacheProperties.isRowIndexCacheEnabled()) {
                OrcRowIndexCacheKey indexCacheKey = new OrcRowIndexCacheKey();
                indexCacheKey.setOrcDataSourceId(new OrcDataSourceIdWithTimeStamp(orcDataSource.getId(), orcDataSource.getLastModifiedTime()));
                indexCacheKey.setStripeOffset(stripe.getOffset());
                indexCacheKey.setStreamId(entry.getKey());
                try {
                    rowGroupIndexes = orcCacheStore.getRowIndexCache().get(indexCacheKey, () -> metadataReader.readRowIndexes(hiveWriterVersion, inputStream));
                } catch (UncheckedExecutionException | ExecutionException executionException) {
                    handleCacheLoadException(executionException);
                    log.debug(executionException.getCause(), "Error while caching row group indexes. Falling back to default flow");
                    rowGroupIndexes = metadataReader.readRowIndexes(hiveWriterVersion, inputStream);
                }
            } else {
                rowGroupIndexes = metadataReader.readRowIndexes(hiveWriterVersion, inputStream);
            }
            if (bloomFilters != null && !bloomFilters.isEmpty()) {
                ImmutableList.Builder<RowGroupIndex> newRowGroupIndexes = ImmutableList.builder();
                for (int i = 0; i < rowGroupIndexes.size(); i++) {
                    RowGroupIndex rowGroupIndex = rowGroupIndexes.get(i);
                    ColumnStatistics columnStatistics = rowGroupIndex.getColumnStatistics().withBloomFilter(bloomFilters.get(i));
                    newRowGroupIndexes.add(new RowGroupIndex(rowGroupIndex.getPositions(), columnStatistics));
                }
                rowGroupIndexes = newRowGroupIndexes.build();
            }
            columnIndexes.put(entry.getKey(), rowGroupIndexes);
        }
    }
    return columnIndexes.build();
}
Also used : ColumnStatistics(io.prestosql.orc.metadata.statistics.ColumnStatistics) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) ImmutableList(com.google.common.collect.ImmutableList) HashableBloomFilter(io.prestosql.orc.metadata.statistics.HashableBloomFilter) ImmutableMap(com.google.common.collect.ImmutableMap) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Checkpoints.getDictionaryStreamCheckpoint(io.prestosql.orc.checkpoint.Checkpoints.getDictionaryStreamCheckpoint) StreamCheckpoint(io.prestosql.orc.checkpoint.StreamCheckpoint) RowGroupIndex(io.prestosql.orc.metadata.RowGroupIndex) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) ValueInputStream(io.prestosql.orc.stream.ValueInputStream) Stream(io.prestosql.orc.metadata.Stream) InputStream(java.io.InputStream) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) ExecutionException(java.util.concurrent.ExecutionException)

Example 3 with OrcInputStream

use of io.prestosql.orc.stream.OrcInputStream in project hetu-core by openlookeng.

the class StripeReader method readStripeFooter.

private StripeFooter readStripeFooter(StripeInformation stripe, AggregatedMemoryContext systemMemoryUsage) throws IOException {
    long offset = stripe.getOffset() + stripe.getIndexLength() + stripe.getDataLength();
    int tailLength = toIntExact(stripe.getFooterLength());
    // read the footer
    Slice tailBuffer = orcDataSource.readFully(offset, tailLength);
    try (InputStream inputStream = new OrcInputStream(OrcChunkLoader.create(orcDataSource.getId(), tailBuffer, decompressor, systemMemoryUsage))) {
        return metadataReader.readStripeFooter(types, inputStream, legacyFileTimeZone);
    }
}
Also used : OrcInputStream(io.prestosql.orc.stream.OrcInputStream) Slice(io.airlift.slice.Slice) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) ValueInputStream(io.prestosql.orc.stream.ValueInputStream) InputStream(java.io.InputStream) Checkpoints.getDictionaryStreamCheckpoint(io.prestosql.orc.checkpoint.Checkpoints.getDictionaryStreamCheckpoint) StreamCheckpoint(io.prestosql.orc.checkpoint.StreamCheckpoint)

Example 4 with OrcInputStream

use of io.prestosql.orc.stream.OrcInputStream in project hetu-core by openlookeng.

the class StripeReader method readBloomFilterIndexes.

private Map<OrcColumnId, List<HashableBloomFilter>> readBloomFilterIndexes(Map<StreamId, Stream> streams, Map<StreamId, OrcChunkLoader> streamsData, StripeInformation stripe) throws IOException {
    HashMap<OrcColumnId, List<HashableBloomFilter>> bloomFilters = new HashMap<>();
    for (Entry<StreamId, Stream> entry : streams.entrySet()) {
        Stream stream = entry.getValue();
        if (stream.getStreamKind() == BLOOM_FILTER_UTF8) {
            OrcInputStream inputStream = new OrcInputStream(streamsData.get(entry.getKey()));
            if (orcCacheProperties.isBloomFilterCacheEnabled()) {
                OrcBloomFilterCacheKey bloomFilterCacheKey = new OrcBloomFilterCacheKey();
                bloomFilterCacheKey.setOrcDataSourceId(new OrcDataSourceIdWithTimeStamp(orcDataSource.getId(), orcDataSource.getLastModifiedTime()));
                bloomFilterCacheKey.setStripeOffset(stripe.getOffset());
                bloomFilterCacheKey.setStreamId(entry.getKey());
                try {
                    bloomFilters.put(stream.getColumnId(), orcCacheStore.getBloomFiltersCache().get(bloomFilterCacheKey, () -> metadataReader.readBloomFilterIndexes(inputStream)));
                } catch (UncheckedExecutionException | ExecutionException executionException) {
                    handleCacheLoadException(executionException);
                    log.debug(executionException.getCause(), "Error while caching bloom filters. Falling back to default flow");
                    bloomFilters.put(stream.getColumnId(), metadataReader.readBloomFilterIndexes(inputStream));
                }
            } else {
                bloomFilters.put(stream.getColumnId(), metadataReader.readBloomFilterIndexes(inputStream));
            }
        }
    }
    for (Entry<StreamId, Stream> entry : streams.entrySet()) {
        Stream stream = entry.getValue();
        if (stream.getStreamKind() == BLOOM_FILTER && !bloomFilters.containsKey(stream.getColumnId())) {
            OrcInputStream inputStream = new OrcInputStream(streamsData.get(entry.getKey()));
            if (orcCacheProperties.isBloomFilterCacheEnabled()) {
                OrcBloomFilterCacheKey bloomFilterCacheKey = new OrcBloomFilterCacheKey();
                bloomFilterCacheKey.setOrcDataSourceId(new OrcDataSourceIdWithTimeStamp(orcDataSource.getId(), orcDataSource.getLastModifiedTime()));
                bloomFilterCacheKey.setStripeOffset(stripe.getOffset());
                bloomFilterCacheKey.setStreamId(entry.getKey());
                try {
                    bloomFilters.put(entry.getKey().getColumnId(), orcCacheStore.getBloomFiltersCache().get(bloomFilterCacheKey, () -> metadataReader.readBloomFilterIndexes(inputStream)));
                } catch (UncheckedExecutionException | ExecutionException executionException) {
                    handleCacheLoadException(executionException);
                    log.debug(executionException.getCause(), "Error while caching bloom filters. Falling back to default flow");
                    bloomFilters.put(entry.getKey().getColumnId(), metadataReader.readBloomFilterIndexes(inputStream));
                }
            } else {
                bloomFilters.put(entry.getKey().getColumnId(), metadataReader.readBloomFilterIndexes(inputStream));
            }
        }
    }
    return ImmutableMap.copyOf(bloomFilters);
}
Also used : OrcColumnId(io.prestosql.orc.metadata.OrcColumnId) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) HashMap(java.util.HashMap) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) ValueInputStream(io.prestosql.orc.stream.ValueInputStream) Stream(io.prestosql.orc.metadata.Stream) InputStream(java.io.InputStream) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) ExecutionException(java.util.concurrent.ExecutionException)

Example 5 with OrcInputStream

use of io.prestosql.orc.stream.OrcInputStream in project hetu-core by openlookeng.

the class StripeReader method readStripe.

public Stripe readStripe(StripeInformation stripe, AggregatedMemoryContext systemMemoryUsage) throws IOException {
    // read the stripe footer
    OrcStripeFooterCacheKey cacheKey = new OrcStripeFooterCacheKey();
    cacheKey.setOrcDataSourceId(new OrcDataSourceIdWithTimeStamp(orcDataSource.getId(), orcDataSource.getLastModifiedTime()));
    cacheKey.setStripeOffset(stripe.getOffset());
    StripeFooter stripeFooter;
    if (orcCacheProperties.isStripeFooterCacheEnabled()) {
        try {
            stripeFooter = orcCacheStore.getStripeFooterCache().get(cacheKey, () -> this.readStripeFooter(stripe, systemMemoryUsage));
        } catch (UncheckedExecutionException | ExecutionException executionException) {
            handleCacheLoadException(executionException);
            log.debug(executionException.getCause(), "Error while caching ORC stripe footer. Falling back to default flow");
            stripeFooter = readStripeFooter(stripe, systemMemoryUsage);
        }
    } else {
        stripeFooter = readStripeFooter(stripe, systemMemoryUsage);
    }
    ColumnMetadata<ColumnEncoding> columnEncodings = stripeFooter.getColumnEncodings();
    if (writeValidation.isPresent()) {
        writeValidation.get().validateTimeZone(orcDataSource.getId(), stripeFooter.getTimeZone());
    }
    ZoneId fileTimeZone = stripeFooter.getTimeZone();
    // get streams for selected columns
    Map<StreamId, Stream> streams = new HashMap<>();
    for (Stream stream : stripeFooter.getStreams()) {
        if (includedOrcColumnIds.contains(stream.getColumnId()) && isSupportedStreamType(stream, types.get(stream.getColumnId()).getOrcTypeKind())) {
            streams.put(new StreamId(stream), stream);
        }
    }
    // handle stripes with more than one row group
    boolean invalidCheckPoint = false;
    if (stripe.getNumberOfRows() > rowsInRowGroup) {
        // determine ranges of the stripe to read
        Map<StreamId, DiskRange> diskRanges = getDiskRanges(stripeFooter.getStreams());
        diskRanges = Maps.filterKeys(diskRanges, Predicates.in(streams.keySet()));
        // read the file regions
        Map<StreamId, OrcChunkLoader> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage);
        // read the bloom filter for each column
        Map<OrcColumnId, List<HashableBloomFilter>> bloomFilterIndexes = readBloomFilterIndexes(streams, streamsData, stripe);
        // read the row index for each column
        Map<StreamId, List<RowGroupIndex>> columnIndexes = readColumnIndexes(streams, streamsData, bloomFilterIndexes, stripe);
        if (writeValidation.isPresent()) {
            writeValidation.get().validateRowGroupStatistics(orcDataSource.getId(), stripe.getOffset(), columnIndexes);
        }
        // select the row groups matching the tuple domain
        Set<Integer> selectedRowGroups = selectRowGroups(stripe, columnIndexes);
        // if all row groups are skipped, return null
        if (selectedRowGroups.isEmpty()) {
            // set accounted memory usage to zero
            systemMemoryUsage.close();
            return null;
        }
        // value streams
        Map<StreamId, ValueInputStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);
        // build the dictionary streams
        InputStreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings);
        // build the row groups
        try {
            List<RowGroup> rowGroups = createRowGroups(stripe.getNumberOfRows(), streams, valueStreams, columnIndexes, selectedRowGroups, columnEncodings);
            return new Stripe(stripe.getNumberOfRows(), fileTimeZone, columnEncodings, rowGroups, dictionaryStreamSources);
        } catch (InvalidCheckpointException e) {
            // The ORC file contains a corrupt checkpoint stream treat the stripe as a single row group.
            invalidCheckPoint = true;
        }
    }
    // stripe only has one row group
    ImmutableMap.Builder<StreamId, DiskRange> diskRangesBuilder = ImmutableMap.builder();
    for (Entry<StreamId, DiskRange> entry : getDiskRanges(stripeFooter.getStreams()).entrySet()) {
        StreamId streamId = entry.getKey();
        if (streams.containsKey(streamId)) {
            diskRangesBuilder.put(entry);
        }
    }
    ImmutableMap<StreamId, DiskRange> diskRanges = diskRangesBuilder.build();
    // read the file regions
    Map<StreamId, OrcChunkLoader> streamsData = readDiskRanges(stripe.getOffset(), diskRanges, systemMemoryUsage);
    long minAverageRowBytes = 0;
    for (Entry<StreamId, Stream> entry : streams.entrySet()) {
        if (entry.getKey().getStreamKind() == ROW_INDEX) {
            List<RowGroupIndex> rowGroupIndexes;
            if (orcCacheProperties.isRowIndexCacheEnabled()) {
                OrcRowIndexCacheKey indexCacheKey = new OrcRowIndexCacheKey();
                indexCacheKey.setOrcDataSourceId(new OrcDataSourceIdWithTimeStamp(orcDataSource.getId(), orcDataSource.getLastModifiedTime()));
                indexCacheKey.setStripeOffset(stripe.getOffset());
                indexCacheKey.setStreamId(entry.getKey());
                try {
                    rowGroupIndexes = orcCacheStore.getRowIndexCache().get(indexCacheKey, () -> metadataReader.readRowIndexes(hiveWriterVersion, new OrcInputStream(streamsData.get(entry.getKey()))));
                } catch (UncheckedExecutionException | ExecutionException executionException) {
                    handleCacheLoadException(executionException);
                    log.debug(executionException.getCause(), "Error while caching row group indexes. Falling back to default flow");
                    rowGroupIndexes = metadataReader.readRowIndexes(hiveWriterVersion, new OrcInputStream(streamsData.get(entry.getKey())));
                }
            } else {
                rowGroupIndexes = metadataReader.readRowIndexes(hiveWriterVersion, new OrcInputStream(streamsData.get(entry.getKey())));
            }
            checkState(rowGroupIndexes.size() == 1 || invalidCheckPoint, "expect a single row group or an invalid check point");
            long totalBytes = 0;
            long totalRows = 0;
            for (RowGroupIndex rowGroupIndex : rowGroupIndexes) {
                ColumnStatistics columnStatistics = rowGroupIndex.getColumnStatistics();
                if (columnStatistics.hasMinAverageValueSizeInBytes()) {
                    totalBytes += columnStatistics.getMinAverageValueSizeInBytes() * columnStatistics.getNumberOfValues();
                    totalRows += columnStatistics.getNumberOfValues();
                }
            }
            if (totalRows > 0) {
                minAverageRowBytes += totalBytes / totalRows;
            }
        }
    }
    // value streams
    Map<StreamId, ValueInputStream<?>> valueStreams = createValueStreams(streams, streamsData, columnEncodings);
    // build the dictionary streams
    InputStreamSources dictionaryStreamSources = createDictionaryStreamSources(streams, valueStreams, columnEncodings);
    // build the row group
    ImmutableMap.Builder<StreamId, InputStreamSource<?>> builder = ImmutableMap.builder();
    for (Entry<StreamId, ValueInputStream<?>> entry : valueStreams.entrySet()) {
        builder.put(entry.getKey(), new ValueInputStreamSource<>(entry.getValue()));
    }
    RowGroup rowGroup = new RowGroup(0, 0, stripe.getNumberOfRows(), minAverageRowBytes, new InputStreamSources(builder.build()));
    return new Stripe(stripe.getNumberOfRows(), fileTimeZone, columnEncodings, ImmutableList.of(rowGroup), dictionaryStreamSources);
}
Also used : ValueInputStream(io.prestosql.orc.stream.ValueInputStream) OrcColumnId(io.prestosql.orc.metadata.OrcColumnId) HashMap(java.util.HashMap) InvalidCheckpointException(io.prestosql.orc.checkpoint.InvalidCheckpointException) ValueInputStreamSource(io.prestosql.orc.stream.ValueInputStreamSource) InputStreamSource(io.prestosql.orc.stream.InputStreamSource) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) ValueInputStream(io.prestosql.orc.stream.ValueInputStream) Stream(io.prestosql.orc.metadata.Stream) InputStream(java.io.InputStream) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) ExecutionException(java.util.concurrent.ExecutionException) ColumnStatistics(io.prestosql.orc.metadata.statistics.ColumnStatistics) UncheckedExecutionException(com.google.common.util.concurrent.UncheckedExecutionException) OrcInputStream(io.prestosql.orc.stream.OrcInputStream) ZoneId(java.time.ZoneId) OrcChunkLoader(io.prestosql.orc.stream.OrcChunkLoader) ImmutableMap(com.google.common.collect.ImmutableMap) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) ColumnEncoding(io.prestosql.orc.metadata.ColumnEncoding) InputStreamSources(io.prestosql.orc.stream.InputStreamSources) StripeFooter(io.prestosql.orc.metadata.StripeFooter) RowGroupIndex(io.prestosql.orc.metadata.RowGroupIndex)

Aggregations

OrcInputStream (io.prestosql.orc.stream.OrcInputStream)6 InputStream (java.io.InputStream)6 Stream (io.prestosql.orc.metadata.Stream)4 ValueInputStream (io.prestosql.orc.stream.ValueInputStream)4 ImmutableList (com.google.common.collect.ImmutableList)3 UncheckedExecutionException (com.google.common.util.concurrent.UncheckedExecutionException)3 Slice (io.airlift.slice.Slice)3 OrcColumnId (io.prestosql.orc.metadata.OrcColumnId)3 ArrayList (java.util.ArrayList)3 List (java.util.List)3 ExecutionException (java.util.concurrent.ExecutionException)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 ImmutableMap.toImmutableMap (com.google.common.collect.ImmutableMap.toImmutableMap)2 Checkpoints.getDictionaryStreamCheckpoint (io.prestosql.orc.checkpoint.Checkpoints.getDictionaryStreamCheckpoint)2 StreamCheckpoint (io.prestosql.orc.checkpoint.StreamCheckpoint)2 OrcMetadataReader (io.prestosql.orc.metadata.OrcMetadataReader)2 RowGroupIndex (io.prestosql.orc.metadata.RowGroupIndex)2 StripeFooter (io.prestosql.orc.metadata.StripeFooter)2 ColumnStatistics (io.prestosql.orc.metadata.statistics.ColumnStatistics)2 HashMap (java.util.HashMap)2