use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class TestHFile method readNumMetablocks.
private void readNumMetablocks(Reader reader, int n) throws IOException {
for (int i = 0; i < n; i++) {
ByteBuff actual = reader.getMetaBlock("HFileMeta" + i, false).getBufferWithoutHeader();
ByteBuffer expected = ByteBuffer.wrap(("something to test" + i).getBytes());
assertEquals("failed to match metadata", Bytes.toStringBinary(expected), Bytes.toStringBinary(actual.array(), actual.arrayOffset() + actual.position(), actual.capacity()));
}
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class TestRpcServer method testAllocateByteBuffToReadInto.
@Test
public void testAllocateByteBuffToReadInto() throws Exception {
int maxBuffersInPool = 10;
ByteBufferPool pool = new ByteBufferPool(6 * 1024, maxBuffersInPool);
initPoolWithAllBuffers(pool, maxBuffersInPool);
ByteBuff buff = null;
Pair<ByteBuff, CallCleanup> pair;
// When the request size is less than 1/6th of the pool buffer size. We should use on demand
// created on heap Buffer
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 200);
buff = pair.getFirst();
assertTrue(buff.hasArray());
assertEquals(maxBuffersInPool, pool.getQueueSize());
assertNull(pair.getSecond());
// When the request size is > 1/6th of the pool buffer size.
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 1024);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(maxBuffersInPool, pool.getQueueSize());
// Request size> pool buffer size
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
ByteBuffer[] bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertTrue(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(1024, bbs[1].limit());
assertEquals(maxBuffersInPool - 2, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(maxBuffersInPool, pool.getQueueSize());
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 6 * 1024 + 200);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertFalse(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(200, bbs[1].limit());
assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(maxBuffersInPool, pool.getQueueSize());
ByteBuffer[] buffers = new ByteBuffer[maxBuffersInPool - 1];
for (int i = 0; i < maxBuffersInPool - 1; i++) {
buffers[i] = pool.getBuffer();
}
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 20 * 1024);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertFalse(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(14 * 1024, bbs[1].limit());
assertEquals(0, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(1, pool.getQueueSize());
pool.getBuffer();
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
buff = pair.getFirst();
assertTrue(buff.hasArray());
assertTrue(buff instanceof SingleByteBuff);
assertEquals(7 * 1024, ((SingleByteBuff) buff).getEnclosingByteBuffer().limit());
assertNull(pair.getSecond());
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class TestByteBufferIOEngine method testByteBufferIOEngineWithMBB.
@Test
public void testByteBufferIOEngineWithMBB() throws Exception {
// 32 MB
int capacity = 32 * 1024 * 1024;
int testNum = 100;
int maxBlockSize = 64 * 1024;
ByteBufferIOEngine ioEngine = new ByteBufferIOEngine(capacity, false);
int testOffsetAtStartNum = testNum / 10;
int testOffsetAtEndNum = testNum / 10;
for (int i = 0; i < testNum; i++) {
byte val = (byte) (Math.random() * 255);
int blockSize = (int) (Math.random() * maxBlockSize);
if (blockSize == 0) {
blockSize = 1;
}
byte[] byteArray = new byte[blockSize];
for (int j = 0; j < byteArray.length; ++j) {
byteArray[j] = val;
}
ByteBuffer srcBuffer = ByteBuffer.wrap(byteArray);
int offset = 0;
if (testOffsetAtStartNum > 0) {
testOffsetAtStartNum--;
offset = 0;
} else if (testOffsetAtEndNum > 0) {
testOffsetAtEndNum--;
offset = capacity - blockSize;
} else {
offset = (int) (Math.random() * (capacity - maxBlockSize));
}
ioEngine.write(srcBuffer, offset);
BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
ioEngine.read(offset, blockSize, deserializer);
ByteBuff dstBuffer = deserializer.buf;
for (int j = 0; j < byteArray.length; ++j) {
assertTrue(srcBuffer.get(j) == dstBuffer.get(j));
}
}
assert testOffsetAtStartNum == 0;
assert testOffsetAtEndNum == 0;
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class TestByteBufferIOEngine method testByteBufferIOEngine.
@Test
public void testByteBufferIOEngine() throws Exception {
// 32 MB
int capacity = 32 * 1024 * 1024;
int testNum = 100;
int maxBlockSize = 64 * 1024;
ByteBufferIOEngine ioEngine = new ByteBufferIOEngine(capacity, false);
int testOffsetAtStartNum = testNum / 10;
int testOffsetAtEndNum = testNum / 10;
for (int i = 0; i < testNum; i++) {
byte val = (byte) (Math.random() * 255);
int blockSize = (int) (Math.random() * maxBlockSize);
if (blockSize == 0) {
blockSize = 1;
}
byte[] byteArray = new byte[blockSize];
for (int j = 0; j < byteArray.length; ++j) {
byteArray[j] = val;
}
ByteBuffer srcBuffer = ByteBuffer.wrap(byteArray);
int offset = 0;
if (testOffsetAtStartNum > 0) {
testOffsetAtStartNum--;
offset = 0;
} else if (testOffsetAtEndNum > 0) {
testOffsetAtEndNum--;
offset = capacity - blockSize;
} else {
offset = (int) (Math.random() * (capacity - maxBlockSize));
}
ioEngine.write(srcBuffer, offset);
BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
ioEngine.read(offset, blockSize, deserializer);
ByteBuff dstBuffer = deserializer.buf;
for (int j = 0; j < byteArray.length; ++j) {
assertTrue(byteArray[j] == dstBuffer.get(j));
}
}
assert testOffsetAtStartNum == 0;
assert testOffsetAtEndNum == 0;
}
use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.
the class RowIndexSeekerV1 method getRow.
private ByteBuffer getRow(int index) {
int offset = rowOffsets.getIntAfterPosition(index * Bytes.SIZEOF_INT);
ByteBuff block = currentBuffer.duplicate();
block.position(offset + Bytes.SIZEOF_LONG);
short rowLen = block.getShort();
block.asSubByteBuffer(block.position(), rowLen, tmpPair);
ByteBuffer row = tmpPair.getFirst();
row.position(tmpPair.getSecond()).limit(tmpPair.getSecond() + rowLen);
return row;
}
Aggregations