Search in sources :

Example 1 with DataPage

use of org.apache.parquet.column.page.DataPage in project parquet-mr by apache.

the class TestParquetFileWriter method validateContains.

private void validateContains(MessageType schema, PageReadStore pages, String[] path, int values, BytesInput bytes) throws IOException {
    PageReader pageReader = pages.getPageReader(schema.getColumnDescription(path));
    DataPage page = pageReader.readPage();
    assertEquals(values, page.getValueCount());
    assertArrayEquals(bytes.toByteArray(), ((DataPageV1) page).getBytes().toByteArray());
}
Also used : DataPage(org.apache.parquet.column.page.DataPage) PageReader(org.apache.parquet.column.page.PageReader) DataPageV1(org.apache.parquet.column.page.DataPageV1)

Example 2 with DataPage

use of org.apache.parquet.column.page.DataPage in project parquet-mr by apache.

the class ShowPagesCommand method run.

@Override
@SuppressWarnings("unchecked")
public int run() throws IOException {
    Preconditions.checkArgument(targets != null && targets.size() >= 1, "A Parquet file is required.");
    Preconditions.checkArgument(targets.size() == 1, "Cannot process multiple Parquet files.");
    String source = targets.get(0);
    ParquetFileReader reader = ParquetFileReader.open(getConf(), qualifiedPath(source));
    MessageType schema = reader.getFileMetaData().getSchema();
    Map<ColumnDescriptor, PrimitiveType> columns = Maps.newLinkedHashMap();
    if (this.columns == null || this.columns.isEmpty()) {
        for (ColumnDescriptor descriptor : schema.getColumns()) {
            columns.put(descriptor, primitive(schema, descriptor.getPath()));
        }
    } else {
        for (String column : this.columns) {
            columns.put(descriptor(column, schema), primitive(column, schema));
        }
    }
    CompressionCodecName codec = reader.getRowGroups().get(0).getColumns().get(0).getCodec();
    // accumulate formatted lines to print by column
    Map<String, List<String>> formatted = Maps.newLinkedHashMap();
    PageFormatter formatter = new PageFormatter();
    PageReadStore pageStore;
    int rowGroupNum = 0;
    while ((pageStore = reader.readNextRowGroup()) != null) {
        for (ColumnDescriptor descriptor : columns.keySet()) {
            List<String> lines = formatted.get(columnName(descriptor));
            if (lines == null) {
                lines = Lists.newArrayList();
                formatted.put(columnName(descriptor), lines);
            }
            formatter.setContext(rowGroupNum, columns.get(descriptor), codec);
            PageReader pages = pageStore.getPageReader(descriptor);
            DictionaryPage dict = pages.readDictionaryPage();
            if (dict != null) {
                lines.add(formatter.format(dict));
            }
            DataPage page;
            while ((page = pages.readPage()) != null) {
                lines.add(formatter.format(page));
            }
        }
        rowGroupNum += 1;
    }
    // TODO: Show total column size and overall size per value in the column summary line
    for (String columnName : formatted.keySet()) {
        console.info(String.format("\nColumn: %s\n%s", columnName, StringUtils.leftPad("", 80, '-')));
        console.info(formatter.getHeader());
        for (String line : formatted.get(columnName)) {
            console.info(line);
        }
        console.info("");
    }
    return 0;
}
Also used : DataPage(org.apache.parquet.column.page.DataPage) ParquetFileReader(org.apache.parquet.hadoop.ParquetFileReader) ColumnDescriptor(org.apache.parquet.column.ColumnDescriptor) PageReader(org.apache.parquet.column.page.PageReader) Util.minMaxAsString(org.apache.parquet.cli.Util.minMaxAsString) Util.encodingAsString(org.apache.parquet.cli.Util.encodingAsString) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) PageReadStore(org.apache.parquet.column.page.PageReadStore) PrimitiveType(org.apache.parquet.schema.PrimitiveType) List(java.util.List) MessageType(org.apache.parquet.schema.MessageType) DictionaryPage(org.apache.parquet.column.page.DictionaryPage)

Example 3 with DataPage

use of org.apache.parquet.column.page.DataPage in project parquet-mr by apache.

the class DumpCommand method dump.

public static void dump(final PrettyPrintWriter out, PageReadStore store, ColumnDescriptor column) throws IOException {
    PageReader reader = store.getPageReader(column);
    long vc = reader.getTotalValueCount();
    int rmax = column.getMaxRepetitionLevel();
    int dmax = column.getMaxDefinitionLevel();
    out.format("%s TV=%d RL=%d DL=%d", Joiner.on('.').skipNulls().join(column.getPath()), vc, rmax, dmax);
    DictionaryPage dict = reader.readDictionaryPage();
    if (dict != null) {
        out.format(" DS:%d", dict.getDictionarySize());
        out.format(" DE:%s", dict.getEncoding());
    }
    out.println();
    out.rule('-');
    DataPage page = reader.readPage();
    for (long count = 0; page != null; count++) {
        out.format("page %d:", count);
        page.accept(new Visitor<Void>() {

            @Override
            public Void visit(DataPageV1 pageV1) {
                out.format(" DLE:%s", pageV1.getDlEncoding());
                out.format(" RLE:%s", pageV1.getRlEncoding());
                out.format(" VLE:%s", pageV1.getValueEncoding());
                Statistics<?> statistics = pageV1.getStatistics();
                if (statistics != null) {
                    out.format(" ST:[%s]", statistics);
                } else {
                    out.format(" ST:[none]");
                }
                return null;
            }

            @Override
            public Void visit(DataPageV2 pageV2) {
                out.format(" DLE:RLE");
                out.format(" RLE:RLE");
                out.format(" VLE:%s", pageV2.getDataEncoding());
                Statistics<?> statistics = pageV2.getStatistics();
                if (statistics != null) {
                    out.format(" ST:[%s]", statistics);
                } else {
                    out.format(" ST:[none]");
                }
                return null;
            }
        });
        out.format(" SZ:%d", page.getUncompressedSize());
        out.format(" VC:%d", page.getValueCount());
        out.println();
        page = reader.readPage();
    }
}
Also used : DataPage(org.apache.parquet.column.page.DataPage) PageReader(org.apache.parquet.column.page.PageReader) DataPageV2(org.apache.parquet.column.page.DataPageV2) DataPageV1(org.apache.parquet.column.page.DataPageV1) Statistics(org.apache.parquet.column.statistics.Statistics) DictionaryPage(org.apache.parquet.column.page.DictionaryPage)

Example 4 with DataPage

use of org.apache.parquet.column.page.DataPage in project flink by apache.

the class AbstractColumnReader method readToVector.

/**
 * Reads `total` values from this columnReader into column.
 */
@Override
public final void readToVector(int readNumber, VECTOR vector) throws IOException {
    int rowId = 0;
    WritableIntVector dictionaryIds = null;
    if (dictionary != null) {
        dictionaryIds = vector.reserveDictionaryIds(readNumber);
    }
    while (readNumber > 0) {
        // Compute the number of values we want to read in this page.
        int leftInPage = (int) (endOfPageValueCount - valuesRead);
        if (leftInPage == 0) {
            DataPage page = pageReader.readPage();
            if (page instanceof DataPageV1) {
                readPageV1((DataPageV1) page);
            } else if (page instanceof DataPageV2) {
                readPageV2((DataPageV2) page);
            } else {
                throw new RuntimeException("Unsupported page type: " + page.getClass());
            }
            leftInPage = (int) (endOfPageValueCount - valuesRead);
        }
        int num = Math.min(readNumber, leftInPage);
        if (isCurrentPageDictionaryEncoded) {
            // Read and decode dictionary ids.
            runLenDecoder.readDictionaryIds(num, dictionaryIds, vector, rowId, maxDefLevel, this.dictionaryIdsDecoder);
            if (vector.hasDictionary() || (rowId == 0 && supportLazyDecode())) {
                // Column vector supports lazy decoding of dictionary values so just set the
                // dictionary.
                // We can't do this if rowId != 0 AND the column doesn't have a dictionary (i.e.
                // some
                // non-dictionary encoded values have already been added).
                vector.setDictionary(new ParquetDictionary(dictionary));
            } else {
                readBatchFromDictionaryIds(rowId, num, vector, dictionaryIds);
            }
        } else {
            if (vector.hasDictionary() && rowId != 0) {
                // This batch already has dictionary encoded values but this new page is not.
                // The batch
                // does not support a mix of dictionary and not so we will decode the
                // dictionary.
                readBatchFromDictionaryIds(0, rowId, vector, vector.getDictionaryIds());
            }
            vector.setDictionary(null);
            readBatch(rowId, num, vector);
        }
        valuesRead += num;
        rowId += num;
        readNumber -= num;
    }
}
Also used : DataPage(org.apache.parquet.column.page.DataPage) DataPageV2(org.apache.parquet.column.page.DataPageV2) DataPageV1(org.apache.parquet.column.page.DataPageV1) ParquetDictionary(org.apache.flink.formats.parquet.vector.ParquetDictionary) WritableIntVector(org.apache.flink.table.data.columnar.vector.writable.WritableIntVector)

Example 5 with DataPage

use of org.apache.parquet.column.page.DataPage in project hive by apache.

the class VectorizedPrimitiveColumnReader method readPage.

private void readPage() throws IOException {
    DataPage page = pageReader.readPage();
    // TODO: Why is this a visitor?
    page.accept(new DataPage.Visitor<Void>() {

        @Override
        public Void visit(DataPageV1 dataPageV1) {
            readPageV1(dataPageV1);
            return null;
        }

        @Override
        public Void visit(DataPageV2 dataPageV2) {
            readPageV2(dataPageV2);
            return null;
        }
    });
}
Also used : DataPage(org.apache.parquet.column.page.DataPage) DataPageV2(org.apache.parquet.column.page.DataPageV2) DataPageV1(org.apache.parquet.column.page.DataPageV1)

Aggregations

DataPage (org.apache.parquet.column.page.DataPage)11 DataPageV1 (org.apache.parquet.column.page.DataPageV1)6 DataPageV2 (org.apache.parquet.column.page.DataPageV2)5 ColumnDescriptor (org.apache.parquet.column.ColumnDescriptor)4 PageReader (org.apache.parquet.column.page.PageReader)4 MessageType (org.apache.parquet.schema.MessageType)3 Test (org.junit.Test)3 ColumnReader (org.apache.parquet.column.ColumnReader)2 DictionaryPage (org.apache.parquet.column.page.DictionaryPage)2 MemPageReader (org.apache.parquet.column.page.mem.MemPageReader)2 MemPageWriter (org.apache.parquet.column.page.mem.MemPageWriter)2 List (java.util.List)1 ParquetDictionary (org.apache.flink.formats.parquet.vector.ParquetDictionary)1 WritableIntVector (org.apache.flink.table.data.columnar.vector.writable.WritableIntVector)1 Util.encodingAsString (org.apache.parquet.cli.Util.encodingAsString)1 Util.minMaxAsString (org.apache.parquet.cli.Util.minMaxAsString)1 PageReadStore (org.apache.parquet.column.page.PageReadStore)1 PageWriter (org.apache.parquet.column.page.PageWriter)1 MemPageStore (org.apache.parquet.column.page.mem.MemPageStore)1 LongStatistics (org.apache.parquet.column.statistics.LongStatistics)1