Search in sources :

Example 6 with RowGroup

use of org.apache.parquet.format.RowGroup in project parquet-mr by apache.

the class ParquetMetadataConverter method filterFileMetaDataByMidpoint.

// Visible for testing
static FileMetaData filterFileMetaDataByMidpoint(FileMetaData metaData, RangeMetadataFilter filter) {
    List<RowGroup> rowGroups = metaData.getRow_groups();
    List<RowGroup> newRowGroups = new ArrayList<RowGroup>();
    for (RowGroup rowGroup : rowGroups) {
        long totalSize = 0;
        long startIndex = getOffset(rowGroup.getColumns().get(0));
        for (ColumnChunk col : rowGroup.getColumns()) {
            totalSize += col.getMeta_data().getTotal_compressed_size();
        }
        long midPoint = startIndex + totalSize / 2;
        if (filter.contains(midPoint)) {
            newRowGroups.add(rowGroup);
        }
    }
    metaData.setRow_groups(newRowGroups);
    return metaData;
}
Also used : RowGroup(org.apache.parquet.format.RowGroup) ArrayList(java.util.ArrayList) ColumnChunk(org.apache.parquet.format.ColumnChunk)

Example 7 with RowGroup

use of org.apache.parquet.format.RowGroup in project parquet-mr by apache.

the class TestParquetMetadataConverter method verifyMD.

private void verifyMD(FileMetaData md, long... offsets) {
    assertEquals(offsets.length, md.row_groups.size());
    for (int i = 0; i < offsets.length; i++) {
        long offset = offsets[i];
        RowGroup rowGroup = md.getRow_groups().get(i);
        assertEquals(offset, getOffset(rowGroup));
    }
}
Also used : RowGroup(org.apache.parquet.format.RowGroup) ParquetMetadataConverter.filterFileMetaDataByMidpoint(org.apache.parquet.format.converter.ParquetMetadataConverter.filterFileMetaDataByMidpoint)

Example 8 with RowGroup

use of org.apache.parquet.format.RowGroup in project presto by prestodb.

the class ParquetWriter method updateRowGroups.

private void updateRowGroups(List<ColumnMetaData> columnMetaData) {
    // TODO Avoid writing empty row group
    long totalBytes = columnMetaData.stream().mapToLong(ColumnMetaData::getTotal_compressed_size).sum();
    ImmutableList<org.apache.parquet.format.ColumnChunk> columnChunks = columnMetaData.stream().map(ParquetWriter::toColumnChunk).collect(toImmutableList());
    rowGroupBuilder.add(new RowGroup(columnChunks, totalBytes, rows));
}
Also used : RowGroup(org.apache.parquet.format.RowGroup)

Example 9 with RowGroup

use of org.apache.parquet.format.RowGroup in project presto by prestodb.

the class MetadataReader method readFooter.

public static ParquetFileMetadata readFooter(ParquetDataSource parquetDataSource, long fileSize) throws IOException {
    // Parquet File Layout:
    // 
    // MAGIC
    // variable: Data
    // variable: Metadata
    // 4 bytes: MetadataLength
    // MAGIC
    validateParquet(fileSize >= MAGIC.length() + POST_SCRIPT_SIZE, "%s is not a valid Parquet File", parquetDataSource.getId());
    // EXPECTED_FOOTER_SIZE is an int, so this will never fail
    byte[] buffer = new byte[toIntExact(min(fileSize, EXPECTED_FOOTER_SIZE))];
    parquetDataSource.readFully(fileSize - buffer.length, buffer);
    Slice tailSlice = wrappedBuffer(buffer);
    Slice magic = tailSlice.slice(tailSlice.length() - MAGIC.length(), MAGIC.length());
    if (!MAGIC.equals(magic)) {
        throw new ParquetCorruptionException(format("Not valid Parquet file: %s expected magic number: %s got: %s", parquetDataSource.getId(), Arrays.toString(MAGIC.getBytes()), Arrays.toString(magic.getBytes())));
    }
    int metadataLength = tailSlice.getInt(tailSlice.length() - POST_SCRIPT_SIZE);
    int completeFooterSize = metadataLength + POST_SCRIPT_SIZE;
    long metadataFileOffset = fileSize - completeFooterSize;
    validateParquet(metadataFileOffset >= MAGIC.length() && metadataFileOffset + POST_SCRIPT_SIZE < fileSize, "Corrupted Parquet file: %s metadata index: %s out of range", parquetDataSource.getId(), metadataFileOffset);
    // Ensure the slice covers the entire metadata range
    if (tailSlice.length() < completeFooterSize) {
        byte[] footerBuffer = new byte[completeFooterSize];
        parquetDataSource.readFully(metadataFileOffset, footerBuffer, 0, footerBuffer.length - tailSlice.length());
        // Copy the previous slice contents into the new buffer
        tailSlice.getBytes(0, footerBuffer, footerBuffer.length - tailSlice.length(), tailSlice.length());
        tailSlice = wrappedBuffer(footerBuffer, 0, footerBuffer.length);
    }
    FileMetaData fileMetaData = readFileMetaData(tailSlice.slice(tailSlice.length() - completeFooterSize, metadataLength).getInput());
    List<SchemaElement> schema = fileMetaData.getSchema();
    validateParquet(!schema.isEmpty(), "Empty Parquet schema in file: %s", parquetDataSource.getId());
    MessageType messageType = readParquetSchema(schema);
    List<BlockMetaData> blocks = new ArrayList<>();
    List<RowGroup> rowGroups = fileMetaData.getRow_groups();
    if (rowGroups != null) {
        for (RowGroup rowGroup : rowGroups) {
            BlockMetaData blockMetaData = new BlockMetaData();
            blockMetaData.setRowCount(rowGroup.getNum_rows());
            blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
            List<ColumnChunk> columns = rowGroup.getColumns();
            validateParquet(!columns.isEmpty(), "No columns in row group: %s", rowGroup);
            String filePath = columns.get(0).getFile_path();
            for (ColumnChunk columnChunk : columns) {
                validateParquet((filePath == null && columnChunk.getFile_path() == null) || (filePath != null && filePath.equals(columnChunk.getFile_path())), "all column chunks of the same row group must be in the same file");
                ColumnMetaData metaData = columnChunk.meta_data;
                String[] path = metaData.path_in_schema.stream().map(value -> value.toLowerCase(Locale.ENGLISH)).toArray(String[]::new);
                ColumnPath columnPath = ColumnPath.get(path);
                PrimitiveType primitiveType = messageType.getType(columnPath.toArray()).asPrimitiveType();
                PrimitiveTypeName primitiveTypeName = primitiveType.getPrimitiveTypeName();
                ColumnChunkMetaData column = ColumnChunkMetaData.get(columnPath, primitiveType, CompressionCodecName.fromParquet(metaData.codec), PARQUET_METADATA_CONVERTER.convertEncodingStats(metaData.encoding_stats), readEncodings(metaData.encodings), readStats(metaData.statistics, primitiveTypeName), metaData.data_page_offset, metaData.dictionary_page_offset, metaData.num_values, metaData.total_compressed_size, metaData.total_uncompressed_size);
                column.setColumnIndexReference(toColumnIndexReference(columnChunk));
                column.setOffsetIndexReference(toOffsetIndexReference(columnChunk));
                blockMetaData.addColumn(column);
            }
            blockMetaData.setPath(filePath);
            blocks.add(blockMetaData);
        }
    }
    Map<String, String> keyValueMetaData = new HashMap<>();
    List<KeyValue> keyValueList = fileMetaData.getKey_value_metadata();
    if (keyValueList != null) {
        for (KeyValue keyValue : keyValueList) {
            keyValueMetaData.put(keyValue.key, keyValue.value);
        }
    }
    ParquetMetadata parquetMetadata = new ParquetMetadata(new org.apache.parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, fileMetaData.getCreated_by()), blocks);
    return new ParquetFileMetadata(parquetMetadata, toIntExact(metadataLength));
}
Also used : PrimitiveType(org.apache.parquet.schema.PrimitiveType) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) Arrays(java.util.Arrays) Slice(io.airlift.slice.Slice) Util.readFileMetaData(org.apache.parquet.format.Util.readFileMetaData) ConvertedType(org.apache.parquet.format.ConvertedType) Repetition(org.apache.parquet.schema.Type.Repetition) HashMap(java.util.HashMap) FileMetaData(org.apache.parquet.format.FileMetaData) ParquetMetadataConverter(org.apache.parquet.format.converter.ParquetMetadataConverter) ArrayList(java.util.ArrayList) ParquetCorruptionException(com.facebook.presto.parquet.ParquetCorruptionException) HashSet(java.util.HashSet) Slices.wrappedBuffer(io.airlift.slice.Slices.wrappedBuffer) KeyValue(org.apache.parquet.format.KeyValue) Locale(java.util.Locale) SchemaElement(org.apache.parquet.format.SchemaElement) Map(java.util.Map) Type(org.apache.parquet.format.Type) IndexReference(org.apache.parquet.internal.hadoop.metadata.IndexReference) PrimitiveTypeName(org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName) Math.toIntExact(java.lang.Math.toIntExact) OriginalType(org.apache.parquet.schema.OriginalType) ParquetDataSource(com.facebook.presto.parquet.ParquetDataSource) Types(org.apache.parquet.schema.Types) Iterator(java.util.Iterator) Encoding(org.apache.parquet.format.Encoding) Set(java.util.Set) Statistics(org.apache.parquet.format.Statistics) IOException(java.io.IOException) Math.min(java.lang.Math.min) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) String.format(java.lang.String.format) ColumnChunk(org.apache.parquet.format.ColumnChunk) ColumnMetaData(org.apache.parquet.format.ColumnMetaData) US_ASCII(java.nio.charset.StandardCharsets.US_ASCII) MessageType(org.apache.parquet.schema.MessageType) List(java.util.List) RowGroup(org.apache.parquet.format.RowGroup) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) Collections(java.util.Collections) ParquetValidationUtils.validateParquet(com.facebook.presto.parquet.ParquetValidationUtils.validateParquet) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) KeyValue(org.apache.parquet.format.KeyValue) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) HashMap(java.util.HashMap) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) RowGroup(org.apache.parquet.format.RowGroup) ArrayList(java.util.ArrayList) ColumnChunk(org.apache.parquet.format.ColumnChunk) ParquetCorruptionException(com.facebook.presto.parquet.ParquetCorruptionException) SchemaElement(org.apache.parquet.format.SchemaElement) PrimitiveType(org.apache.parquet.schema.PrimitiveType) ColumnMetaData(org.apache.parquet.format.ColumnMetaData) Util.readFileMetaData(org.apache.parquet.format.Util.readFileMetaData) FileMetaData(org.apache.parquet.format.FileMetaData) MessageType(org.apache.parquet.schema.MessageType) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) PrimitiveTypeName(org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName) Slice(io.airlift.slice.Slice)

Aggregations

RowGroup (org.apache.parquet.format.RowGroup)9 ArrayList (java.util.ArrayList)7 ColumnChunk (org.apache.parquet.format.ColumnChunk)5 ColumnMetaData (org.apache.parquet.format.ColumnMetaData)4 FileMetaData (org.apache.parquet.format.FileMetaData)3 BlockMetaData (org.apache.parquet.hadoop.metadata.BlockMetaData)3 ColumnChunkMetaData (org.apache.parquet.hadoop.metadata.ColumnChunkMetaData)3 HashMap (java.util.HashMap)2 KeyValue (org.apache.parquet.format.KeyValue)2 SchemaElement (org.apache.parquet.format.SchemaElement)2 Util.readFileMetaData (org.apache.parquet.format.Util.readFileMetaData)2 ColumnPath (org.apache.parquet.hadoop.metadata.ColumnPath)2 ParquetMetadata (org.apache.parquet.hadoop.metadata.ParquetMetadata)2 MessageType (org.apache.parquet.schema.MessageType)2 ParquetCorruptionException (com.facebook.presto.parquet.ParquetCorruptionException)1 ParquetDataSource (com.facebook.presto.parquet.ParquetDataSource)1 ParquetValidationUtils.validateParquet (com.facebook.presto.parquet.ParquetValidationUtils.validateParquet)1 Slice (io.airlift.slice.Slice)1 Slices.wrappedBuffer (io.airlift.slice.Slices.wrappedBuffer)1 IOException (java.io.IOException)1