use of org.apache.hadoop.hbase.io.compress.CanReinit in project hbase by apache.
the class HFileBlockDefaultDecodingContext method prepareDecoding.
@Override
public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException {
final ByteBuffInputStream byteBuffInputStream = new ByteBuffInputStream(onDiskBlock);
InputStream dataInputStream = new DataInputStream(byteBuffInputStream);
try {
Encryption.Context cryptoContext = fileContext.getEncryptionContext();
if (cryptoContext != Encryption.Context.NONE) {
Cipher cipher = cryptoContext.getCipher();
Decryptor decryptor = cipher.getDecryptor();
decryptor.setKey(cryptoContext.getKey());
// Encrypted block format:
// +--------------------------+
// | byte iv length |
// +--------------------------+
// | iv data ... |
// +--------------------------+
// | encrypted block data ... |
// +--------------------------+
int ivLength = dataInputStream.read();
if (ivLength > 0) {
byte[] iv = new byte[ivLength];
IOUtils.readFully(dataInputStream, iv);
decryptor.setIv(iv);
// All encrypted blocks will have a nonzero IV length. If we see an IV
// length of zero, this means the encoding context had 0 bytes of
// plaintext to encode.
decryptor.reset();
dataInputStream = decryptor.createDecryptionStream(dataInputStream);
}
onDiskSizeWithoutHeader -= Bytes.SIZEOF_BYTE + ivLength;
}
Compression.Algorithm compression = fileContext.getCompression();
if (compression != Compression.Algorithm.NONE) {
Decompressor decompressor = null;
try {
decompressor = compression.getDecompressor();
// same when creating decompression streams. We can ignore these cases wrt reinit.
if (decompressor instanceof CanReinit) {
((CanReinit) decompressor).reinit(conf);
}
try (InputStream is = compression.createDecompressionStream(dataInputStream, decompressor, 0)) {
BlockIOUtils.readFullyWithHeapBuffer(is, blockBufferWithoutHeader, uncompressedSizeWithoutHeader);
}
} finally {
if (decompressor != null) {
compression.returnDecompressor(decompressor);
}
}
} else {
BlockIOUtils.readFullyWithHeapBuffer(dataInputStream, blockBufferWithoutHeader, onDiskSizeWithoutHeader);
}
} finally {
byteBuffInputStream.close();
dataInputStream.close();
}
}
Aggregations