Search in sources :

Example 11 with Algorithm

use of org.apache.hadoop.hbase.io.compress.Compression.Algorithm in project hbase by apache.

the class HBaseTestingUtil method getSupportedCompressionAlgorithms.

/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
    for (String algoName : allAlgos) {
        try {
            Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
            algo.getCompressor();
            supportedAlgos.add(algo);
        } catch (Throwable t) {
        // this algo is not available
        }
    }
    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
Also used : Compression(org.apache.hadoop.hbase.io.compress.Compression) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) ArrayList(java.util.ArrayList) SplitAlgorithm(org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm)

Example 12 with Algorithm

use of org.apache.hadoop.hbase.io.compress.Compression.Algorithm in project hbase by apache.

the class TestHFileBlock method testReaderV2Internals.

protected void testReaderV2Internals() throws IOException {
    final Configuration conf = TEST_UTIL.getConfiguration();
    if (includesTag) {
        conf.setInt("hfile.format.version", 3);
    }
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testReaderV2: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i) dos.writeInt(i);
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();
            FSDataInputStream is = fs.open(path);
            meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(algo).build();
            ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)).withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build();
            HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, TEST_UTIL.getConfiguration());
            HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
            is.close();
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            HFileBlock expected = b;
            if (algo == GZ) {
                is = fs.open(path);
                ReaderContext readerContext = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)).withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build();
                hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc, TEST_UTIL.getConfiguration());
                b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false, true);
                assertEquals(expected, b);
                int wrongCompressedSize = 2172;
                try {
                    hbr.readBlockData(0, wrongCompressedSize + HConstants.HFILEBLOCK_HEADER_SIZE, pread, false, true);
                    fail("Exception expected");
                } catch (IOException ex) {
                    String expectedPrefix = "Passed in onDiskSizeWithHeader=";
                    assertTrue("Invalid exception message: '" + ex.getMessage() + "'.\nMessage is expected to start with: '" + expectedPrefix + "'", ex.getMessage().startsWith(expectedPrefix));
                }
                assertRelease(b);
                is.close();
            }
            assertRelease(expected);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 13 with Algorithm

use of org.apache.hadoop.hbase.io.compress.Compression.Algorithm in project hbase by apache.

the class HBaseTestingUtility method getSupportedCompressionAlgorithms.

/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
    String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
    List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
    for (String algoName : allAlgos) {
        try {
            Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
            algo.getCompressor();
            supportedAlgos.add(algo);
        } catch (Throwable t) {
        // this algo is not available
        }
    }
    return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
Also used : Compression(org.apache.hadoop.hbase.io.compress.Compression) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) ArrayList(java.util.ArrayList) SplitAlgorithm(org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithm) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm)

Aggregations

Algorithm (org.apache.hadoop.hbase.io.compress.Compression.Algorithm)13 Map (java.util.Map)6 TreeMap (java.util.TreeMap)6 Configuration (org.apache.hadoop.conf.Configuration)6 Path (org.apache.hadoop.fs.Path)5 Compression (org.apache.hadoop.hbase.io.compress.Compression)5 ArrayList (java.util.ArrayList)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)4 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)4 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)4 BloomType (org.apache.hadoop.hbase.regionserver.BloomType)4 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)3 DataBlockEncoding (org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)3 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)3 StoreFileWriter (org.apache.hadoop.hbase.regionserver.StoreFileWriter)3 IOException (java.io.IOException)2 ByteBuffer (java.nio.ByteBuffer)2 HashMap (java.util.HashMap)2