use of org.apache.hadoop.hbase.io.ByteBufferOutputStream in project hbase by apache.
the class CellBlockBuilder method decompress.
private ByteBuffer decompress(CompressionCodec compressor, InputStream cellBlockStream, int osInitialSize) throws IOException {
// GZIPCodec fails w/ NPE if no configuration.
if (compressor instanceof Configurable) {
((Configurable) compressor).setConf(this.conf);
}
Decompressor poolDecompressor = CodecPool.getDecompressor(compressor);
CompressionInputStream cis = compressor.createInputStream(cellBlockStream, poolDecompressor);
ByteBufferOutputStream bbos;
try {
// TODO: This is ugly. The buffer will be resized on us if we guess wrong.
// TODO: Reuse buffers.
bbos = new ByteBufferOutputStream(osInitialSize);
IOUtils.copy(cis, bbos);
bbos.close();
return bbos.getByteBuffer();
} finally {
CodecPool.returnDecompressor(poolDecompressor);
}
}
use of org.apache.hadoop.hbase.io.ByteBufferOutputStream in project hbase by apache.
the class LzmaCompressor method compress.
@Override
public int compress(byte[] b, int off, int len) throws IOException {
// remaining, provide them to the caller.
if (outBuf.hasRemaining()) {
int remaining = outBuf.remaining(), n = Math.min(remaining, len);
outBuf.get(b, off, n);
LOG.trace("compress: {} bytes from outBuf", n);
return n;
}
// We don't actually begin compression until our caller calls finish().
if (finish) {
if (inBuf.position() > 0) {
inBuf.flip();
int uncompressed = inBuf.remaining();
// If we don't have enough capacity in our currently allocated output buffer,
// allocate a new one which does.
int needed = maxCompressedLength(uncompressed);
// Can we decompress directly into the provided array?
ByteBuffer writeBuffer;
boolean direct = false;
if (len <= needed) {
writeBuffer = ByteBuffer.wrap(b, off, len);
direct = true;
} else {
if (outBuf.capacity() < needed) {
needed = CompressionUtil.roundInt2(needed);
LOG.trace("compress: resize outBuf {}", needed);
outBuf = ByteBuffer.allocate(needed);
} else {
outBuf.clear();
}
writeBuffer = outBuf;
}
int oldPos = writeBuffer.position();
// that inefficiencies here may not matter.
try (ByteBufferOutputStream lowerOut = new ByteBufferOutputStream(writeBuffer) {
@Override
protected // do not want that behavior here.
void checkSizeAndGrow(int extra) {
long capacityNeeded = curBuf.position() + (long) extra;
if (capacityNeeded > curBuf.limit()) {
throw new BufferOverflowException();
}
}
}) {
try (LZMAOutputStream out = new LZMAOutputStream(lowerOut, lzOptions, uncompressed, ARRAY_CACHE)) {
out.write(inBuf.array(), inBuf.arrayOffset(), uncompressed);
}
}
int written = writeBuffer.position() - oldPos;
bytesWritten += written;
inBuf.clear();
LOG.trace("compress: compressed {} -> {}", uncompressed, written);
finished = true;
outBuf.flip();
if (!direct) {
int n = Math.min(written, len);
outBuf.get(b, off, n);
LOG.trace("compress: {} bytes", n);
return n;
} else {
LOG.trace("compress: {} bytes direct", written);
return written;
}
} else {
finished = true;
}
}
LOG.trace("No output");
return 0;
}
use of org.apache.hadoop.hbase.io.ByteBufferOutputStream in project hbase by apache.
the class ServerRpcConnection method responseConnectionHeader.
/**
* Send the response for connection header
*/
private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) throws FatalConnectionException {
// Response the connection header if Crypto AES is enabled
if (!chrBuilder.hasCryptoCipherMeta())
return;
try {
byte[] connectionHeaderResBytes = chrBuilder.build().toByteArray();
// encrypt the Crypto AES cipher meta data with sasl server, and send to client
byte[] unwrapped = new byte[connectionHeaderResBytes.length + 4];
Bytes.putBytes(unwrapped, 0, Bytes.toBytes(connectionHeaderResBytes.length), 0, 4);
Bytes.putBytes(unwrapped, 4, connectionHeaderResBytes, 0, connectionHeaderResBytes.length);
byte[] wrapped = saslServer.wrap(unwrapped, 0, unwrapped.length);
BufferChain bc;
try (ByteBufferOutputStream response = new ByteBufferOutputStream(wrapped.length + 4);
DataOutputStream out = new DataOutputStream(response)) {
out.writeInt(wrapped.length);
out.write(wrapped);
bc = new BufferChain(response.getByteBuffer());
}
doRespond(() -> bc);
} catch (IOException ex) {
throw new UnsupportedCryptoException(ex.getMessage(), ex);
}
}
use of org.apache.hadoop.hbase.io.ByteBufferOutputStream in project hbase by apache.
the class ServerRpcConnection method doRawSaslReply.
/**
* No protobuf encoding of raw sasl messages
*/
protected final void doRawSaslReply(SaslStatus status, Writable rv, String errorClass, String error) throws IOException {
BufferChain bc;
// in the ballpark of 100-200. That's why the initial capacity is 256.
try (ByteBufferOutputStream saslResponse = new ByteBufferOutputStream(256);
DataOutputStream out = new DataOutputStream(saslResponse)) {
// write status
out.writeInt(status.state);
if (status == SaslStatus.SUCCESS) {
rv.write(out);
} else {
WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error);
}
bc = new BufferChain(saslResponse.getByteBuffer());
}
doRespond(() -> bc);
}
Aggregations