use of org.apache.hadoop.hbase.io.compress.Compression.Algorithm in project hbase by apache.
the class HBaseTestingUtil method getSupportedCompressionAlgorithms.
/**
* Get supported compression algorithms.
* @return supported compression algorithms.
*/
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
for (String algoName : allAlgos) {
try {
Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
algo.getCompressor();
supportedAlgos.add(algo);
} catch (Throwable t) {
// this algo is not available
}
}
return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
use of org.apache.hadoop.hbase.io.compress.Compression.Algorithm in project hbase by apache.
the class TestHFileBlock method testReaderV2Internals.
protected void testReaderV2Internals() throws IOException {
final Configuration conf = TEST_UTIL.getConfiguration();
if (includesTag) {
conf.setInt("hfile.format.version", 3);
}
for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
for (boolean pread : new boolean[] { false, true }) {
LOG.info("testReaderV2: Compression algorithm: " + algo + ", pread=" + pread);
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
HFileBlock.Writer hbw = new HFileBlock.Writer(conf, null, meta);
long totalSize = 0;
for (int blockId = 0; blockId < 2; ++blockId) {
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1234; ++i) dos.writeInt(i);
hbw.writeHeaderAndData(os);
totalSize += hbw.getOnDiskSizeWithHeader();
}
os.close();
FSDataInputStream is = fs.open(path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(algo).build();
ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)).withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, TEST_UTIL.getConfiguration());
HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
is.close();
assertEquals(0, HFile.getAndResetChecksumFailuresCount());
b.sanityCheck();
assertEquals(4936, b.getUncompressedSizeWithoutHeader());
assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
HFileBlock expected = b;
if (algo == GZ) {
is = fs.open(path);
ReaderContext readerContext = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)).withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build();
hbr = new HFileBlock.FSReaderImpl(readerContext, meta, alloc, TEST_UTIL.getConfiguration());
b = hbr.readBlockData(0, 2173 + HConstants.HFILEBLOCK_HEADER_SIZE + b.totalChecksumBytes(), pread, false, true);
assertEquals(expected, b);
int wrongCompressedSize = 2172;
try {
hbr.readBlockData(0, wrongCompressedSize + HConstants.HFILEBLOCK_HEADER_SIZE, pread, false, true);
fail("Exception expected");
} catch (IOException ex) {
String expectedPrefix = "Passed in onDiskSizeWithHeader=";
assertTrue("Invalid exception message: '" + ex.getMessage() + "'.\nMessage is expected to start with: '" + expectedPrefix + "'", ex.getMessage().startsWith(expectedPrefix));
}
assertRelease(b);
is.close();
}
assertRelease(expected);
}
}
}
use of org.apache.hadoop.hbase.io.compress.Compression.Algorithm in project hbase by apache.
the class HBaseTestingUtility method getSupportedCompressionAlgorithms.
/**
* Get supported compression algorithms.
* @return supported compression algorithms.
*/
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
for (String algoName : allAlgos) {
try {
Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
algo.getCompressor();
supportedAlgos.add(algo);
} catch (Throwable t) {
// this algo is not available
}
}
return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
Aggregations