use of org.apache.hadoop.io.compress.Compressor in project hadoop by apache.
the class TestZlibCompressorDecompressor method testZlibCompressorDecompressorWithCompressionLevels.
@Test
public void testZlibCompressorDecompressorWithCompressionLevels() {
Configuration conf = new Configuration();
conf.set("zlib.compress.level", "FOUR");
if (ZlibFactory.isNativeZlibLoaded(conf)) {
byte[] rawData;
int tryNumber = 5;
int BYTE_SIZE = 10 * 1024;
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
rawData = generate(BYTE_SIZE);
try {
for (int i = 0; i < tryNumber; i++) compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor, (ZlibDecompressor) zlibDecompressor);
zlibCompressor.reinit(conf);
} catch (Exception ex) {
fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
}
} else {
assertTrue("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf));
}
}
use of org.apache.hadoop.io.compress.Compressor in project hbase by apache.
the class ChangeCompressionAction method perform.
@Override
public void perform() throws Exception {
HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();
if (columnDescriptors == null || columnDescriptors.length == 0) {
return;
}
// Possible compression algorithms. If an algorithm is not supported,
// modifyTable will fail, so there is no harm.
Algorithm[] possibleAlgos = Algorithm.values();
// Since not every compression algorithm is supported,
// let's use the same algorithm for all column families.
// If an unsupported compression algorithm is chosen, pick a different one.
// This is to work around the issue that modifyTable() does not throw remote
// exception.
Algorithm algo;
do {
algo = possibleAlgos[random.nextInt(possibleAlgos.length)];
try {
Compressor c = algo.getCompressor();
// call returnCompressor() to release the Compressor
algo.returnCompressor(c);
break;
} catch (Throwable t) {
LOG.info("Performing action: Changing compression algorithms to " + algo + " is not supported, pick another one");
}
} while (true);
LOG.debug("Performing action: Changing compression algorithms on " + tableName.getNameAsString() + " to " + algo);
for (HColumnDescriptor descriptor : columnDescriptors) {
if (random.nextBoolean()) {
descriptor.setCompactionCompressionType(algo);
} else {
descriptor.setCompressionType(algo);
}
}
// Don't try the modify if we're stopping
if (context.isStopping()) {
return;
}
admin.modifyTable(tableName, tableDescriptor);
}
use of org.apache.hadoop.io.compress.Compressor in project hbase by apache.
the class CompressionTest method testCompression.
public static void testCompression(Compression.Algorithm algo) throws IOException {
if (compressionTestResults[algo.ordinal()] != null) {
if (compressionTestResults[algo.ordinal()]) {
// already passed test, dont do it again.
return;
} else {
// failed.
throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" + " previously failed test.");
}
}
try {
Compressor c = algo.getCompressor();
algo.returnCompressor(c);
// passes
compressionTestResults[algo.ordinal()] = true;
} catch (Throwable t) {
// failure
compressionTestResults[algo.ordinal()] = false;
throw new DoNotRetryIOException(t);
}
}
use of org.apache.hadoop.io.compress.Compressor in project hbase by apache.
the class CellBlockBuilder method encodeCellsTo.
private void encodeCellsTo(OutputStream os, CellScanner cellScanner, Codec codec, CompressionCodec compressor) throws IOException {
Compressor poolCompressor = null;
try {
if (compressor != null) {
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
poolCompressor = CodecPool.getCompressor(compressor);
os = compressor.createOutputStream(os, poolCompressor);
}
Codec.Encoder encoder = codec.getEncoder(os);
while (cellScanner.advance()) {
encoder.write(cellScanner.current());
}
encoder.flush();
} catch (BufferOverflowException | IndexOutOfBoundsException e) {
throw new DoNotRetryIOException(e);
} finally {
os.close();
if (poolCompressor != null) {
CodecPool.returnCompressor(poolCompressor);
}
}
}
use of org.apache.hadoop.io.compress.Compressor in project asterixdb by apache.
the class CodecPool method getCompressor.
/**
* Get a {@link Compressor} for the given {@link CompressionCodec} from the
* pool or a new one.
*
* @param codec
* the <code>CompressionCodec</code> for which to get the
* <code>Compressor</code>
* @return <code>Compressor</code> for the given <code>CompressionCodec</code>
* from the pool or a new one
*/
public static Compressor getCompressor(CompressionCodec codec) {
Compressor compressor = borrow(COMPRESSOR_POOL, codec.getCompressorType());
if (compressor == null) {
compressor = codec.createCompressor();
LOG.info("Got brand-new compressor");
} else {
LOG.debug("Got recycled compressor");
}
return compressor;
}
Aggregations