Search in sources :

Example 1 with Compression

use of org.apache.hadoop.hbase.io.compress.Compression in project hbase by apache.

the class HFileBlockDefaultDecodingContext method prepareDecoding.

@Override
public void prepareDecoding(int onDiskSizeWithoutHeader, int uncompressedSizeWithoutHeader, ByteBuff blockBufferWithoutHeader, ByteBuff onDiskBlock) throws IOException {
    final ByteBuffInputStream byteBuffInputStream = new ByteBuffInputStream(onDiskBlock);
    InputStream dataInputStream = new DataInputStream(byteBuffInputStream);
    try {
        Encryption.Context cryptoContext = fileContext.getEncryptionContext();
        if (cryptoContext != Encryption.Context.NONE) {
            Cipher cipher = cryptoContext.getCipher();
            Decryptor decryptor = cipher.getDecryptor();
            decryptor.setKey(cryptoContext.getKey());
            // Encrypted block format:
            // +--------------------------+
            // | byte iv length           |
            // +--------------------------+
            // | iv data ...              |
            // +--------------------------+
            // | encrypted block data ... |
            // +--------------------------+
            int ivLength = dataInputStream.read();
            if (ivLength > 0) {
                byte[] iv = new byte[ivLength];
                IOUtils.readFully(dataInputStream, iv);
                decryptor.setIv(iv);
                // All encrypted blocks will have a nonzero IV length. If we see an IV
                // length of zero, this means the encoding context had 0 bytes of
                // plaintext to encode.
                decryptor.reset();
                dataInputStream = decryptor.createDecryptionStream(dataInputStream);
            }
            onDiskSizeWithoutHeader -= Bytes.SIZEOF_BYTE + ivLength;
        }
        Compression.Algorithm compression = fileContext.getCompression();
        if (compression != Compression.Algorithm.NONE) {
            Decompressor decompressor = null;
            try {
                decompressor = compression.getDecompressor();
                // same when creating decompression streams. We can ignore these cases wrt reinit.
                if (decompressor instanceof CanReinit) {
                    ((CanReinit) decompressor).reinit(conf);
                }
                try (InputStream is = compression.createDecompressionStream(dataInputStream, decompressor, 0)) {
                    BlockIOUtils.readFullyWithHeapBuffer(is, blockBufferWithoutHeader, uncompressedSizeWithoutHeader);
                }
            } finally {
                if (decompressor != null) {
                    compression.returnDecompressor(decompressor);
                }
            }
        } else {
            BlockIOUtils.readFullyWithHeapBuffer(dataInputStream, blockBufferWithoutHeader, onDiskSizeWithoutHeader);
        }
    } finally {
        byteBuffInputStream.close();
        dataInputStream.close();
    }
}
Also used : Compression(org.apache.hadoop.hbase.io.compress.Compression) Decryptor(org.apache.hadoop.hbase.io.crypto.Decryptor) Decompressor(org.apache.hadoop.io.compress.Decompressor) CanReinit(org.apache.hadoop.hbase.io.compress.CanReinit) DataInputStream(java.io.DataInputStream) ByteBuffInputStream(org.apache.hadoop.hbase.io.ByteBuffInputStream) InputStream(java.io.InputStream) Encryption(org.apache.hadoop.hbase.io.crypto.Encryption) DataInputStream(java.io.DataInputStream) ByteBuffInputStream(org.apache.hadoop.hbase.io.ByteBuffInputStream) Cipher(org.apache.hadoop.hbase.io.crypto.Cipher)

Example 2 with Compression

use of org.apache.hadoop.hbase.io.compress.Compression in project hbase by apache.

the class PerformanceEvaluation method doMultipleClients.

/**
 * Run all clients in this vm each to its own thread.
 * @param cmd Command to run
 * @throws IOException if creating a connection fails
 */
private void doMultipleClients(final Class<? extends Test> cmd) throws IOException {
    final List<Thread> threads = new ArrayList<>(this.N);
    final long[] timings = new long[this.N];
    final int perClientRows = R / N;
    final TableName tableName = this.tableName;
    final DataBlockEncoding encoding = this.blockEncoding;
    final boolean flushCommits = this.flushCommits;
    final Compression.Algorithm compression = this.compression;
    final boolean writeToWal = this.writeToWAL;
    final int preSplitRegions = this.presplitRegions;
    final boolean useTags = this.useTags;
    final int numTags = this.noOfTags;
    final Connection connection = ConnectionFactory.createConnection(getConf());
    for (int i = 0; i < this.N; i++) {
        final int index = i;
        Thread t = new Thread("TestClient-" + i) {

            @Override
            public void run() {
                super.run();
                PerformanceEvaluation pe = new PerformanceEvaluation(getConf());
                pe.tableName = tableName;
                pe.blockEncoding = encoding;
                pe.flushCommits = flushCommits;
                pe.compression = compression;
                pe.writeToWAL = writeToWal;
                pe.presplitRegions = preSplitRegions;
                pe.N = N;
                pe.connection = connection;
                pe.useTags = useTags;
                pe.noOfTags = numTags;
                try {
                    long elapsedTime = pe.runOneClient(cmd, index * perClientRows, perClientRows, R, flushCommits, writeToWAL, useTags, noOfTags, connection, msg -> LOG.info("client-" + getName() + " " + msg));
                    timings[index] = elapsedTime;
                    LOG.info("Finished " + getName() + " in " + elapsedTime + "ms writing " + perClientRows + " rows");
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
        };
        threads.add(t);
    }
    for (Thread t : threads) {
        t.start();
    }
    for (Thread t : threads) {
        while (t.isAlive()) {
            try {
                t.join();
            } catch (InterruptedException e) {
                LOG.debug("Interrupted, continuing" + e.toString());
            }
        }
    }
    final String test = cmd.getSimpleName();
    LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(timings));
    Arrays.sort(timings);
    long total = 0;
    for (int i = 0; i < this.N; i++) {
        total += timings[i];
    }
    LOG.info("[" + test + "]" + "\tMin: " + timings[0] + "ms" + "\tMax: " + timings[this.N - 1] + "ms" + "\tAvg: " + (total / this.N) + "ms");
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Compression(org.apache.hadoop.hbase.io.compress.Compression) ArrayList(java.util.ArrayList) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) TableName(org.apache.hadoop.hbase.TableName)

Example 3 with Compression

use of org.apache.hadoop.hbase.io.compress.Compression in project hbase by apache.

the class TestChecksum method testChecksumCorruptionInternals.

protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i) dos.writeInt(i);
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();
            // Use hbase checksums.
            assertEquals(true, hfs.useHBaseChecksum());
            // Do a read that purposely introduces checksum verification failures.
            FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).build();
            ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem(fs).withFilePath(path).build();
            HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta, TEST_UTIL.getConfiguration());
            HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            ByteBuff bb = b.unpack(meta, hbr).getBufferWithoutHeader();
            DataInputStream in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we encountered hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(1, HFile.getAndResetChecksumFailuresCount());
            validateData(in);
            // requests. Verify that this is correct.
            for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
                b = hbr.readBlockData(0, -1, pread, false, true);
                assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
                assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            }
            // The next read should have hbase checksum verification reanabled,
            // we verify this by assertng that there was a hbase-checksum failure.
            b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            assertEquals(1, HFile.getAndResetChecksumFailuresCount());
            // Since the above encountered a checksum failure, we switch
            // back to not checking hbase checksums.
            b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            is.close();
            // Now, use a completely new reader. Switch off hbase checksums in
            // the configuration. In this case, we should not detect
            // any retries within hbase.
            Configuration conf = TEST_UTIL.getConfiguration();
            HFileSystem newfs = new HFileSystem(conf, false);
            assertEquals(false, newfs.useHBaseChecksum());
            is = new FSDataInputStreamWrapper(newfs, path);
            context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem(newfs).withFilePath(path).build();
            hbr = new CorruptedFSReaderImpl(context, meta, conf);
            b = hbr.readBlockData(0, -1, pread, false, true);
            is.close();
            b.sanityCheck();
            b = b.unpack(meta, hbr);
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            bb = b.getBufferWithoutHeader();
            in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we did not encounter hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            validateData(in);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 4 with Compression

use of org.apache.hadoop.hbase.io.compress.Compression in project hbase by apache.

the class AbstractProtobufLogWriter method initializeCompressionContext.

private boolean initializeCompressionContext(Configuration conf, Path path) throws IOException {
    boolean doCompress = conf.getBoolean(HConstants.ENABLE_WAL_COMPRESSION, false);
    if (doCompress) {
        try {
            final boolean useTagCompression = conf.getBoolean(CompressionContext.ENABLE_WAL_TAGS_COMPRESSION, true);
            final boolean useValueCompression = conf.getBoolean(CompressionContext.ENABLE_WAL_VALUE_COMPRESSION, false);
            final Compression.Algorithm valueCompressionType = useValueCompression ? CompressionContext.getValueCompressionAlgorithm(conf) : Compression.Algorithm.NONE;
            if (LOG.isTraceEnabled()) {
                LOG.trace("Initializing compression context for {}: isRecoveredEdits={}" + ", hasTagCompression={}, hasValueCompression={}, valueCompressionType={}", path, CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, valueCompressionType);
            }
            this.compressionContext = new CompressionContext(LRUDictionary.class, CommonFSUtils.isRecoveredEdits(path), useTagCompression, useValueCompression, valueCompressionType);
        } catch (Exception e) {
            throw new IOException("Failed to initiate CompressionContext", e);
        }
    }
    return doCompress;
}
Also used : Compression(org.apache.hadoop.hbase.io.compress.Compression) LRUDictionary(org.apache.hadoop.hbase.io.util.LRUDictionary) IOException(java.io.IOException) IOException(java.io.IOException) StreamLacksCapabilityException(org.apache.hadoop.hbase.util.CommonFSUtils.StreamLacksCapabilityException)

Example 5 with Compression

use of org.apache.hadoop.hbase.io.compress.Compression in project hbase by apache.

the class TestHFileEncryption method testHFileEncryption.

@Test
public void testHFileEncryption() throws Exception {
    // Create 1000 random test KVs
    RedundantKVGenerator generator = new RedundantKVGenerator();
    List<KeyValue> testKvs = generator.generateTestKeyValues(1000);
    // Iterate through data block encoding and compression combinations
    Configuration conf = TEST_UTIL.getConfiguration();
    CacheConfig cacheConf = new CacheConfig(conf);
    for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        for (Compression.Algorithm compression : HBaseCommonTestingUtil.COMPRESSION_ALGORITHMS) {
            HFileContext fileContext = new HFileContextBuilder().withBlockSize(// small blocks
            4096).withEncryptionContext(cryptoContext).withCompression(compression).withDataBlockEncoding(encoding).build();
            // write a new test HFile
            LOG.info("Writing with " + fileContext);
            Path path = new Path(TEST_UTIL.getDataTestDir(), HBaseCommonTestingUtil.getRandomUUID().toString() + ".hfile");
            FSDataOutputStream out = fs.create(path);
            HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf).withOutputStream(out).withFileContext(fileContext).create();
            try {
                for (KeyValue kv : testKvs) {
                    writer.append(kv);
                }
            } finally {
                writer.close();
                out.close();
            }
            // read it back in
            LOG.info("Reading with " + fileContext);
            int i = 0;
            HFileScanner scanner = null;
            HFile.Reader reader = HFile.createReader(fs, path, cacheConf, true, conf);
            try {
                FixedFileTrailer trailer = reader.getTrailer();
                assertNotNull(trailer.getEncryptionKey());
                scanner = reader.getScanner(conf, false, false);
                assertTrue("Initial seekTo failed", scanner.seekTo());
                do {
                    Cell kv = scanner.getCell();
                    assertTrue("Read back an unexpected or invalid KV", testKvs.contains(KeyValueUtil.ensureKeyValue(kv)));
                    i++;
                } while (scanner.next());
            } finally {
                reader.close();
                scanner.close();
            }
            assertEquals("Did not read back as many KVs as written", i, testKvs.size());
            // Test random seeks with pread
            LOG.info("Random seeking with " + fileContext);
            reader = HFile.createReader(fs, path, cacheConf, true, conf);
            try {
                scanner = reader.getScanner(conf, false, true);
                assertTrue("Initial seekTo failed", scanner.seekTo());
                for (i = 0; i < 100; i++) {
                    KeyValue kv = testKvs.get(RNG.nextInt(testKvs.size()));
                    assertEquals("Unable to find KV as expected: " + kv, 0, scanner.seekTo(kv));
                }
            } finally {
                scanner.close();
                reader.close();
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Compression(org.apache.hadoop.hbase.io.compress.Compression) KeyValue(org.apache.hadoop.hbase.KeyValue) RedundantKVGenerator(org.apache.hadoop.hbase.util.RedundantKVGenerator) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Aggregations

Compression (org.apache.hadoop.hbase.io.compress.Compression)16 Path (org.apache.hadoop.fs.Path)9 ArrayList (java.util.ArrayList)7 Algorithm (org.apache.hadoop.hbase.io.compress.Compression.Algorithm)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)6 DataBlockEncoding (org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)5 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)4 DataOutputStream (java.io.DataOutputStream)3 IOException (java.io.IOException)3 ColumnFamilyDescriptor (org.apache.hadoop.hbase.client.ColumnFamilyDescriptor)3 ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)3 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)3 DataInputStream (java.io.DataInputStream)2 ByteBuffer (java.nio.ByteBuffer)2 Random (java.util.Random)2 Cell (org.apache.hadoop.hbase.Cell)2