use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class RpcServer method allocateByteBuffToReadInto.
/**
* This is extracted to a static method for better unit testing. We try to get buffer(s) from pool
* as much as possible.
*
* @param pool The ByteBufferPool to use
* @param minSizeForPoolUse Only for buffer size above this, we will try to use pool. Any buffer
* need of size below this, create on heap ByteBuffer.
* @param reqLen Bytes count in request
*/
@VisibleForTesting
static Pair<ByteBuff, CallCleanup> allocateByteBuffToReadInto(ByteBufferPool pool, int minSizeForPoolUse, int reqLen) {
ByteBuff resultBuf;
List<ByteBuffer> bbs = new ArrayList<>((reqLen / pool.getBufferSize()) + 1);
int remain = reqLen;
ByteBuffer buf = null;
while (remain >= minSizeForPoolUse && (buf = pool.getBuffer()) != null) {
bbs.add(buf);
remain -= pool.getBufferSize();
}
ByteBuffer[] bufsFromPool = null;
if (bbs.size() > 0) {
bufsFromPool = new ByteBuffer[bbs.size()];
bbs.toArray(bufsFromPool);
}
if (remain > 0) {
bbs.add(ByteBuffer.allocate(remain));
}
if (bbs.size() > 1) {
ByteBuffer[] items = new ByteBuffer[bbs.size()];
bbs.toArray(items);
resultBuf = new MultiByteBuff(items);
} else {
// We are backed by single BB
resultBuf = new SingleByteBuff(bbs.get(0));
}
resultBuf.limit(reqLen);
if (bufsFromPool != null) {
final ByteBuffer[] bufsFromPoolFinal = bufsFromPool;
return new Pair<>(resultBuf, () -> {
// Return back all the BBs to pool
for (int i = 0; i < bufsFromPoolFinal.length; i++) {
pool.putbackBuffer(bufsFromPoolFinal[i]);
}
});
}
return new Pair<>(resultBuf, null);
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestByteBuffAllocator method testReferenceCount.
@Test
public void testReferenceCount() {
int bufSize = 64;
ByteBuffAllocator alloc = new ByteBuffAllocator(true, 2, bufSize, 3);
ByteBuff buf1 = alloc.allocate(bufSize * 2);
assertFalse(buf1.hasArray());
// The next one will be allocated from heap
ByteBuff buf2 = alloc.allocateOneBuffer();
assertTrue(buf2.hasArray());
// duplicate the buf2, if the dup released, buf2 will also be released (SingleByteBuffer)
ByteBuff dup2 = buf2.duplicate();
dup2.release();
assertEquals(0, buf2.refCnt());
assertEquals(0, dup2.refCnt());
assertEquals(0, alloc.getFreeBufferCount());
assertException(dup2::position);
assertException(buf2::position);
// duplicate the buf1, if the dup1 released, buf1 will also be released (MultipleByteBuffer)
ByteBuff dup1 = buf1.duplicate();
dup1.release();
assertEquals(0, buf1.refCnt());
assertEquals(0, dup1.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
assertException(dup1::position);
assertException(buf1::position);
// slice the buf3, if the slice3 released, buf3 will also be released (SingleByteBuffer)
ByteBuff buf3 = alloc.allocateOneBuffer();
assertFalse(buf3.hasArray());
ByteBuff slice3 = buf3.slice();
slice3.release();
assertEquals(0, buf3.refCnt());
assertEquals(0, slice3.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
// slice the buf4, if the slice4 released, buf4 will also be released (MultipleByteBuffer)
ByteBuff buf4 = alloc.allocate(bufSize * 2);
assertFalse(buf4.hasArray());
ByteBuff slice4 = buf4.slice();
slice4.release();
assertEquals(0, buf4.refCnt());
assertEquals(0, slice4.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
// Test multiple reference for the same ByteBuff (SingleByteBuff)
ByteBuff buf5 = alloc.allocateOneBuffer();
ByteBuff slice5 = buf5.duplicate().duplicate().duplicate().slice().slice();
slice5.release();
assertEquals(0, buf5.refCnt());
assertEquals(0, slice5.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
assertException(slice5::position);
assertException(buf5::position);
// Test multiple reference for the same ByteBuff (SingleByteBuff)
ByteBuff buf6 = alloc.allocate(bufSize >> 2);
ByteBuff slice6 = buf6.duplicate().duplicate().duplicate().slice().slice();
slice6.release();
assertEquals(0, buf6.refCnt());
assertEquals(0, slice6.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
// Test retain the parent SingleByteBuff (duplicate)
ByteBuff parent = alloc.allocateOneBuffer();
ByteBuff child = parent.duplicate();
child.retain();
parent.release();
assertEquals(1, child.refCnt());
assertEquals(1, parent.refCnt());
assertEquals(1, alloc.getFreeBufferCount());
parent.release();
assertEquals(0, child.refCnt());
assertEquals(0, parent.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
// Test retain parent MultiByteBuff (duplicate)
parent = alloc.allocate(bufSize << 1);
child = parent.duplicate();
child.retain();
parent.release();
assertEquals(1, child.refCnt());
assertEquals(1, parent.refCnt());
assertEquals(0, alloc.getFreeBufferCount());
parent.release();
assertEquals(0, child.refCnt());
assertEquals(0, parent.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
// Test retain the parent SingleByteBuff (slice)
parent = alloc.allocateOneBuffer();
child = parent.slice();
child.retain();
parent.release();
assertEquals(1, child.refCnt());
assertEquals(1, parent.refCnt());
assertEquals(1, alloc.getFreeBufferCount());
parent.release();
assertEquals(0, child.refCnt());
assertEquals(0, parent.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
// Test retain parent MultiByteBuff (slice)
parent = alloc.allocate(bufSize << 1);
child = parent.slice();
child.retain();
parent.release();
assertEquals(1, child.refCnt());
assertEquals(1, parent.refCnt());
assertEquals(0, alloc.getFreeBufferCount());
parent.release();
assertEquals(0, child.refCnt());
assertEquals(0, parent.refCnt());
assertEquals(2, alloc.getFreeBufferCount());
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestByteBuffAllocator method testAllocateByteBuffToReadInto.
@Test
public void testAllocateByteBuffToReadInto() {
int maxBuffersInPool = 10;
int bufSize = 6 * 1024;
ByteBuffAllocator alloc = new ByteBuffAllocator(true, maxBuffersInPool, bufSize, bufSize / 6);
assertEquals(0, alloc.getUsedBufferCount());
ByteBuff buff = alloc.allocate(10 * bufSize);
assertEquals(61440, alloc.getPoolAllocationBytes());
assertEquals(0, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff.release();
// When the request size is less than 1/6th of the pool buffer size. We should use on demand
// created on heap Buffer
buff = alloc.allocate(200);
assertTrue(buff.hasArray());
assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
assertEquals(maxBuffersInPool, alloc.getTotalBufferCount());
assertEquals(61440, alloc.getPoolAllocationBytes());
assertEquals(200, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff.release();
// When the request size is > 1/6th of the pool buffer size.
buff = alloc.allocate(1024);
assertFalse(buff.hasArray());
assertEquals(maxBuffersInPool - 1, alloc.getFreeBufferCount());
assertEquals(67584, alloc.getPoolAllocationBytes());
assertEquals(200, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
// ByteBuff Recycler#free should put back the BB to pool.
buff.release();
assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
// Request size> pool buffer size
buff = alloc.allocate(7 * 1024);
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
ByteBuffer[] bbs = buff.nioByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertTrue(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(1024, bbs[1].limit());
assertEquals(maxBuffersInPool - 2, alloc.getFreeBufferCount());
assertEquals(79872, alloc.getPoolAllocationBytes());
assertEquals(200, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff.release();
assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
buff = alloc.allocate(6 * 1024 + 200);
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
bbs = buff.nioByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertFalse(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(200, bbs[1].limit());
assertEquals(maxBuffersInPool - 1, alloc.getFreeBufferCount());
assertEquals(86016, alloc.getPoolAllocationBytes());
assertEquals(400, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff.release();
assertEquals(maxBuffersInPool, alloc.getFreeBufferCount());
alloc.allocate(bufSize * (maxBuffersInPool - 1));
assertEquals(141312, alloc.getPoolAllocationBytes());
assertEquals(400, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff = alloc.allocate(20 * 1024);
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
bbs = buff.nioByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertFalse(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(14 * 1024, bbs[1].limit());
assertEquals(0, alloc.getFreeBufferCount());
assertEquals(147456, alloc.getPoolAllocationBytes());
assertEquals(14736, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff.release();
assertEquals(1, alloc.getFreeBufferCount());
alloc.allocateOneBuffer();
assertEquals(153600, alloc.getPoolAllocationBytes());
assertEquals(14736, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff = alloc.allocate(7 * 1024);
assertTrue(buff.hasArray());
assertTrue(buff instanceof SingleByteBuff);
assertEquals(7 * 1024, buff.nioByteBuffers()[0].limit());
assertEquals(153600, alloc.getPoolAllocationBytes());
assertEquals(21904, alloc.getHeapAllocationBytes());
assertEquals(10, alloc.getUsedBufferCount());
buff.release();
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class ServerRpcConnection method processUnwrappedData.
private void processUnwrappedData(byte[] inBuf) throws IOException, InterruptedException {
ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(inBuf));
// Read all RPCs contained in the inBuf, even partial ones
while (true) {
int count;
if (unwrappedDataLengthBuffer.remaining() > 0) {
count = this.rpcServer.channelRead(ch, unwrappedDataLengthBuffer);
if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0)
return;
}
if (unwrappedData == null) {
unwrappedDataLengthBuffer.flip();
int unwrappedDataLength = unwrappedDataLengthBuffer.getInt();
if (unwrappedDataLength == RpcClient.PING_CALL_ID) {
if (RpcServer.LOG.isDebugEnabled())
RpcServer.LOG.debug("Received ping message");
unwrappedDataLengthBuffer.clear();
// ping message
continue;
}
unwrappedData = ByteBuffer.allocate(unwrappedDataLength);
}
count = this.rpcServer.channelRead(ch, unwrappedData);
if (count <= 0 || unwrappedData.remaining() > 0)
return;
if (unwrappedData.remaining() == 0) {
unwrappedDataLengthBuffer.clear();
unwrappedData.flip();
processOneRpc(new SingleByteBuff(unwrappedData));
unwrappedData = null;
}
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class NettyServerRpcConnection method process.
void process(final ByteBuf buf) throws IOException, InterruptedException {
if (connectionHeaderRead) {
this.callCleanup = buf::release;
process(new SingleByteBuff(buf.nioBuffer()));
} else {
ByteBuffer connectionHeader = ByteBuffer.allocate(buf.readableBytes());
buf.readBytes(connectionHeader);
buf.release();
process(connectionHeader);
}
}
Aggregations