Search in sources :

Example 1 with KeyValue

use of org.apache.parquet.format.KeyValue in project parquet-mr by apache.

the class ParquetMetadataConverter method fromParquetMetadata.

public ParquetMetadata fromParquetMetadata(FileMetaData parquetMetadata) throws IOException {
    MessageType messageType = fromParquetSchema(parquetMetadata.getSchema(), parquetMetadata.getColumn_orders());
    List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
    List<RowGroup> row_groups = parquetMetadata.getRow_groups();
    if (row_groups != null) {
        for (RowGroup rowGroup : row_groups) {
            BlockMetaData blockMetaData = new BlockMetaData();
            blockMetaData.setRowCount(rowGroup.getNum_rows());
            blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
            List<ColumnChunk> columns = rowGroup.getColumns();
            String filePath = columns.get(0).getFile_path();
            for (ColumnChunk columnChunk : columns) {
                if ((filePath == null && columnChunk.getFile_path() != null) || (filePath != null && !filePath.equals(columnChunk.getFile_path()))) {
                    throw new ParquetDecodingException("all column chunks of the same row group must be in the same file for now");
                }
                ColumnMetaData metaData = columnChunk.meta_data;
                ColumnPath path = getPath(metaData);
                ColumnChunkMetaData column = ColumnChunkMetaData.get(path, messageType.getType(path.toArray()).asPrimitiveType(), fromFormatCodec(metaData.codec), convertEncodingStats(metaData.getEncoding_stats()), fromFormatEncodings(metaData.encodings), fromParquetStatistics(parquetMetadata.getCreated_by(), metaData.statistics, messageType.getType(path.toArray()).asPrimitiveType()), metaData.data_page_offset, metaData.dictionary_page_offset, metaData.num_values, metaData.total_compressed_size, metaData.total_uncompressed_size);
                // TODO
                // index_page_offset
                // key_value_metadata
                blockMetaData.addColumn(column);
            }
            blockMetaData.setPath(filePath);
            blocks.add(blockMetaData);
        }
    }
    Map<String, String> keyValueMetaData = new HashMap<String, String>();
    List<KeyValue> key_value_metadata = parquetMetadata.getKey_value_metadata();
    if (key_value_metadata != null) {
        for (KeyValue keyValue : key_value_metadata) {
            keyValueMetaData.put(keyValue.key, keyValue.value);
        }
    }
    return new ParquetMetadata(new org.apache.parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, parquetMetadata.getCreated_by()), blocks);
}
Also used : BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) ParquetDecodingException(org.apache.parquet.io.ParquetDecodingException) KeyValue(org.apache.parquet.format.KeyValue) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) RowGroup(org.apache.parquet.format.RowGroup) ArrayList(java.util.ArrayList) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) ColumnChunk(org.apache.parquet.format.ColumnChunk) ColumnMetaData(org.apache.parquet.format.ColumnMetaData) MessageType(org.apache.parquet.schema.MessageType)

Example 2 with KeyValue

use of org.apache.parquet.format.KeyValue in project parquet-mr by apache.

the class ParquetMetadataConverter method fromParquetMetadata.

public ParquetMetadata fromParquetMetadata(FileMetaData parquetMetadata, InternalFileDecryptor fileDecryptor, boolean encryptedFooter) throws IOException {
    MessageType messageType = fromParquetSchema(parquetMetadata.getSchema(), parquetMetadata.getColumn_orders());
    List<BlockMetaData> blocks = new ArrayList<BlockMetaData>();
    List<RowGroup> row_groups = parquetMetadata.getRow_groups();
    if (row_groups != null) {
        for (RowGroup rowGroup : row_groups) {
            BlockMetaData blockMetaData = new BlockMetaData();
            blockMetaData.setRowCount(rowGroup.getNum_rows());
            blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
            // not set in legacy files
            if (rowGroup.isSetOrdinal()) {
                blockMetaData.setOrdinal(rowGroup.getOrdinal());
            }
            List<ColumnChunk> columns = rowGroup.getColumns();
            String filePath = columns.get(0).getFile_path();
            int columnOrdinal = -1;
            for (ColumnChunk columnChunk : columns) {
                columnOrdinal++;
                if ((filePath == null && columnChunk.getFile_path() != null) || (filePath != null && !filePath.equals(columnChunk.getFile_path()))) {
                    throw new ParquetDecodingException("all column chunks of the same row group must be in the same file for now");
                }
                ColumnMetaData metaData = columnChunk.meta_data;
                ColumnCryptoMetaData cryptoMetaData = columnChunk.getCrypto_metadata();
                ColumnChunkMetaData column = null;
                ColumnPath columnPath = null;
                boolean encryptedMetadata = false;
                if (null == cryptoMetaData) {
                    // Plaintext column
                    columnPath = getPath(metaData);
                    if (null != fileDecryptor && !fileDecryptor.plaintextFile()) {
                        // mark this column as plaintext in encrypted file decryptor
                        fileDecryptor.setColumnCryptoMetadata(columnPath, false, false, (byte[]) null, columnOrdinal);
                    }
                } else {
                    // Encrypted column
                    boolean encryptedWithFooterKey = cryptoMetaData.isSetENCRYPTION_WITH_FOOTER_KEY();
                    if (encryptedWithFooterKey) {
                        // Column encrypted with footer key
                        if (!encryptedFooter) {
                            throw new ParquetCryptoRuntimeException("Column encrypted with footer key in file with plaintext footer");
                        }
                        if (null == metaData) {
                            throw new ParquetCryptoRuntimeException("ColumnMetaData not set in Encryption with Footer key");
                        }
                        if (null == fileDecryptor) {
                            throw new ParquetCryptoRuntimeException("Column encrypted with footer key: No keys available");
                        }
                        columnPath = getPath(metaData);
                        fileDecryptor.setColumnCryptoMetadata(columnPath, true, true, (byte[]) null, columnOrdinal);
                    } else {
                        // Column encrypted with column key
                        // setColumnCryptoMetadata triggers KMS interaction, hence delayed until this column is projected
                        encryptedMetadata = true;
                    }
                }
                String createdBy = parquetMetadata.getCreated_by();
                if (!encryptedMetadata) {
                    // unencrypted column, or encrypted with footer key
                    column = buildColumnChunkMetaData(metaData, columnPath, messageType.getType(columnPath.toArray()).asPrimitiveType(), createdBy);
                    column.setRowGroupOrdinal(rowGroup.getOrdinal());
                    if (metaData.isSetBloom_filter_offset()) {
                        column.setBloomFilterOffset(metaData.getBloom_filter_offset());
                    }
                } else {
                    // column encrypted with column key
                    // Metadata will be decrypted later, if this column is accessed
                    EncryptionWithColumnKey columnKeyStruct = cryptoMetaData.getENCRYPTION_WITH_COLUMN_KEY();
                    List<String> pathList = columnKeyStruct.getPath_in_schema();
                    byte[] columnKeyMetadata = columnKeyStruct.getKey_metadata();
                    columnPath = ColumnPath.get(pathList.toArray(new String[pathList.size()]));
                    byte[] encryptedMetadataBuffer = columnChunk.getEncrypted_column_metadata();
                    column = ColumnChunkMetaData.getWithEncryptedMetadata(this, columnPath, messageType.getType(columnPath.toArray()).asPrimitiveType(), encryptedMetadataBuffer, columnKeyMetadata, fileDecryptor, rowGroup.getOrdinal(), columnOrdinal, createdBy);
                }
                column.setColumnIndexReference(toColumnIndexReference(columnChunk));
                column.setOffsetIndexReference(toOffsetIndexReference(columnChunk));
                // TODO
                // index_page_offset
                // key_value_metadata
                blockMetaData.addColumn(column);
            }
            blockMetaData.setPath(filePath);
            blocks.add(blockMetaData);
        }
    }
    Map<String, String> keyValueMetaData = new HashMap<String, String>();
    List<KeyValue> key_value_metadata = parquetMetadata.getKey_value_metadata();
    if (key_value_metadata != null) {
        for (KeyValue keyValue : key_value_metadata) {
            keyValueMetaData.put(keyValue.key, keyValue.value);
        }
    }
    return new ParquetMetadata(new org.apache.parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, parquetMetadata.getCreated_by(), fileDecryptor), blocks);
}
Also used : BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) ParquetDecodingException(org.apache.parquet.io.ParquetDecodingException) KeyValue(org.apache.parquet.format.KeyValue) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) ParquetCryptoRuntimeException(org.apache.parquet.crypto.ParquetCryptoRuntimeException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) RowGroup(org.apache.parquet.format.RowGroup) ArrayList(java.util.ArrayList) ColumnChunk(org.apache.parquet.format.ColumnChunk) Util.writeColumnMetaData(org.apache.parquet.format.Util.writeColumnMetaData) ColumnMetaData(org.apache.parquet.format.ColumnMetaData) MessageType(org.apache.parquet.schema.MessageType) ColumnCryptoMetaData(org.apache.parquet.format.ColumnCryptoMetaData) EncryptionWithColumnKey(org.apache.parquet.format.EncryptionWithColumnKey) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath)

Example 3 with KeyValue

use of org.apache.parquet.format.KeyValue in project presto by prestodb.

the class MetadataReader method readFooter.

public static ParquetFileMetadata readFooter(ParquetDataSource parquetDataSource, long fileSize) throws IOException {
    // Parquet File Layout:
    // 
    // MAGIC
    // variable: Data
    // variable: Metadata
    // 4 bytes: MetadataLength
    // MAGIC
    validateParquet(fileSize >= MAGIC.length() + POST_SCRIPT_SIZE, "%s is not a valid Parquet File", parquetDataSource.getId());
    // EXPECTED_FOOTER_SIZE is an int, so this will never fail
    byte[] buffer = new byte[toIntExact(min(fileSize, EXPECTED_FOOTER_SIZE))];
    parquetDataSource.readFully(fileSize - buffer.length, buffer);
    Slice tailSlice = wrappedBuffer(buffer);
    Slice magic = tailSlice.slice(tailSlice.length() - MAGIC.length(), MAGIC.length());
    if (!MAGIC.equals(magic)) {
        throw new ParquetCorruptionException(format("Not valid Parquet file: %s expected magic number: %s got: %s", parquetDataSource.getId(), Arrays.toString(MAGIC.getBytes()), Arrays.toString(magic.getBytes())));
    }
    int metadataLength = tailSlice.getInt(tailSlice.length() - POST_SCRIPT_SIZE);
    int completeFooterSize = metadataLength + POST_SCRIPT_SIZE;
    long metadataFileOffset = fileSize - completeFooterSize;
    validateParquet(metadataFileOffset >= MAGIC.length() && metadataFileOffset + POST_SCRIPT_SIZE < fileSize, "Corrupted Parquet file: %s metadata index: %s out of range", parquetDataSource.getId(), metadataFileOffset);
    // Ensure the slice covers the entire metadata range
    if (tailSlice.length() < completeFooterSize) {
        byte[] footerBuffer = new byte[completeFooterSize];
        parquetDataSource.readFully(metadataFileOffset, footerBuffer, 0, footerBuffer.length - tailSlice.length());
        // Copy the previous slice contents into the new buffer
        tailSlice.getBytes(0, footerBuffer, footerBuffer.length - tailSlice.length(), tailSlice.length());
        tailSlice = wrappedBuffer(footerBuffer, 0, footerBuffer.length);
    }
    FileMetaData fileMetaData = readFileMetaData(tailSlice.slice(tailSlice.length() - completeFooterSize, metadataLength).getInput());
    List<SchemaElement> schema = fileMetaData.getSchema();
    validateParquet(!schema.isEmpty(), "Empty Parquet schema in file: %s", parquetDataSource.getId());
    MessageType messageType = readParquetSchema(schema);
    List<BlockMetaData> blocks = new ArrayList<>();
    List<RowGroup> rowGroups = fileMetaData.getRow_groups();
    if (rowGroups != null) {
        for (RowGroup rowGroup : rowGroups) {
            BlockMetaData blockMetaData = new BlockMetaData();
            blockMetaData.setRowCount(rowGroup.getNum_rows());
            blockMetaData.setTotalByteSize(rowGroup.getTotal_byte_size());
            List<ColumnChunk> columns = rowGroup.getColumns();
            validateParquet(!columns.isEmpty(), "No columns in row group: %s", rowGroup);
            String filePath = columns.get(0).getFile_path();
            for (ColumnChunk columnChunk : columns) {
                validateParquet((filePath == null && columnChunk.getFile_path() == null) || (filePath != null && filePath.equals(columnChunk.getFile_path())), "all column chunks of the same row group must be in the same file");
                ColumnMetaData metaData = columnChunk.meta_data;
                String[] path = metaData.path_in_schema.stream().map(value -> value.toLowerCase(Locale.ENGLISH)).toArray(String[]::new);
                ColumnPath columnPath = ColumnPath.get(path);
                PrimitiveType primitiveType = messageType.getType(columnPath.toArray()).asPrimitiveType();
                PrimitiveTypeName primitiveTypeName = primitiveType.getPrimitiveTypeName();
                ColumnChunkMetaData column = ColumnChunkMetaData.get(columnPath, primitiveType, CompressionCodecName.fromParquet(metaData.codec), PARQUET_METADATA_CONVERTER.convertEncodingStats(metaData.encoding_stats), readEncodings(metaData.encodings), readStats(metaData.statistics, primitiveTypeName), metaData.data_page_offset, metaData.dictionary_page_offset, metaData.num_values, metaData.total_compressed_size, metaData.total_uncompressed_size);
                column.setColumnIndexReference(toColumnIndexReference(columnChunk));
                column.setOffsetIndexReference(toOffsetIndexReference(columnChunk));
                blockMetaData.addColumn(column);
            }
            blockMetaData.setPath(filePath);
            blocks.add(blockMetaData);
        }
    }
    Map<String, String> keyValueMetaData = new HashMap<>();
    List<KeyValue> keyValueList = fileMetaData.getKey_value_metadata();
    if (keyValueList != null) {
        for (KeyValue keyValue : keyValueList) {
            keyValueMetaData.put(keyValue.key, keyValue.value);
        }
    }
    ParquetMetadata parquetMetadata = new ParquetMetadata(new org.apache.parquet.hadoop.metadata.FileMetaData(messageType, keyValueMetaData, fileMetaData.getCreated_by()), blocks);
    return new ParquetFileMetadata(parquetMetadata, toIntExact(metadataLength));
}
Also used : PrimitiveType(org.apache.parquet.schema.PrimitiveType) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) Arrays(java.util.Arrays) Slice(io.airlift.slice.Slice) Util.readFileMetaData(org.apache.parquet.format.Util.readFileMetaData) ConvertedType(org.apache.parquet.format.ConvertedType) Repetition(org.apache.parquet.schema.Type.Repetition) HashMap(java.util.HashMap) FileMetaData(org.apache.parquet.format.FileMetaData) ParquetMetadataConverter(org.apache.parquet.format.converter.ParquetMetadataConverter) ArrayList(java.util.ArrayList) ParquetCorruptionException(com.facebook.presto.parquet.ParquetCorruptionException) HashSet(java.util.HashSet) Slices.wrappedBuffer(io.airlift.slice.Slices.wrappedBuffer) KeyValue(org.apache.parquet.format.KeyValue) Locale(java.util.Locale) SchemaElement(org.apache.parquet.format.SchemaElement) Map(java.util.Map) Type(org.apache.parquet.format.Type) IndexReference(org.apache.parquet.internal.hadoop.metadata.IndexReference) PrimitiveTypeName(org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName) Math.toIntExact(java.lang.Math.toIntExact) OriginalType(org.apache.parquet.schema.OriginalType) ParquetDataSource(com.facebook.presto.parquet.ParquetDataSource) Types(org.apache.parquet.schema.Types) Iterator(java.util.Iterator) Encoding(org.apache.parquet.format.Encoding) Set(java.util.Set) Statistics(org.apache.parquet.format.Statistics) IOException(java.io.IOException) Math.min(java.lang.Math.min) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) String.format(java.lang.String.format) ColumnChunk(org.apache.parquet.format.ColumnChunk) ColumnMetaData(org.apache.parquet.format.ColumnMetaData) US_ASCII(java.nio.charset.StandardCharsets.US_ASCII) MessageType(org.apache.parquet.schema.MessageType) List(java.util.List) RowGroup(org.apache.parquet.format.RowGroup) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) CompressionCodecName(org.apache.parquet.hadoop.metadata.CompressionCodecName) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) Collections(java.util.Collections) ParquetValidationUtils.validateParquet(com.facebook.presto.parquet.ParquetValidationUtils.validateParquet) BlockMetaData(org.apache.parquet.hadoop.metadata.BlockMetaData) KeyValue(org.apache.parquet.format.KeyValue) ColumnChunkMetaData(org.apache.parquet.hadoop.metadata.ColumnChunkMetaData) HashMap(java.util.HashMap) ParquetMetadata(org.apache.parquet.hadoop.metadata.ParquetMetadata) RowGroup(org.apache.parquet.format.RowGroup) ArrayList(java.util.ArrayList) ColumnChunk(org.apache.parquet.format.ColumnChunk) ParquetCorruptionException(com.facebook.presto.parquet.ParquetCorruptionException) SchemaElement(org.apache.parquet.format.SchemaElement) PrimitiveType(org.apache.parquet.schema.PrimitiveType) ColumnMetaData(org.apache.parquet.format.ColumnMetaData) Util.readFileMetaData(org.apache.parquet.format.Util.readFileMetaData) FileMetaData(org.apache.parquet.format.FileMetaData) MessageType(org.apache.parquet.schema.MessageType) ColumnPath(org.apache.parquet.hadoop.metadata.ColumnPath) PrimitiveTypeName(org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName) Slice(io.airlift.slice.Slice)

Example 4 with KeyValue

use of org.apache.parquet.format.KeyValue in project parquet-mr by apache.

the class ParquetMetadataConverter method addKeyValue.

private static void addKeyValue(FileMetaData fileMetaData, String key, String value) {
    KeyValue keyValue = new KeyValue(key);
    keyValue.value = value;
    fileMetaData.addToKey_value_metadata(keyValue);
}
Also used : KeyValue(org.apache.parquet.format.KeyValue)

Aggregations

KeyValue (org.apache.parquet.format.KeyValue)4 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)3 ColumnChunk (org.apache.parquet.format.ColumnChunk)3 ColumnMetaData (org.apache.parquet.format.ColumnMetaData)3 RowGroup (org.apache.parquet.format.RowGroup)3 BlockMetaData (org.apache.parquet.hadoop.metadata.BlockMetaData)3 ColumnChunkMetaData (org.apache.parquet.hadoop.metadata.ColumnChunkMetaData)3 ColumnPath (org.apache.parquet.hadoop.metadata.ColumnPath)3 ParquetMetadata (org.apache.parquet.hadoop.metadata.ParquetMetadata)3 MessageType (org.apache.parquet.schema.MessageType)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 ParquetDecodingException (org.apache.parquet.io.ParquetDecodingException)2 ParquetCorruptionException (com.facebook.presto.parquet.ParquetCorruptionException)1 ParquetDataSource (com.facebook.presto.parquet.ParquetDataSource)1 ParquetValidationUtils.validateParquet (com.facebook.presto.parquet.ParquetValidationUtils.validateParquet)1 Slice (io.airlift.slice.Slice)1 Slices.wrappedBuffer (io.airlift.slice.Slices.wrappedBuffer)1 IOException (java.io.IOException)1 Math.min (java.lang.Math.min)1