Search in sources :

Example 16 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestHFile method readNumMetablocks.

private void readNumMetablocks(Reader reader, int n) throws IOException {
    for (int i = 0; i < n; i++) {
        ByteBuff actual = reader.getMetaBlock("HFileMeta" + i, false).getBufferWithoutHeader();
        ByteBuffer expected = ByteBuffer.wrap(("something to test" + i).getBytes());
        assertEquals("failed to match metadata", Bytes.toStringBinary(expected), Bytes.toStringBinary(actual.array(), actual.arrayOffset() + actual.position(), actual.capacity()));
    }
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer)

Example 17 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestRpcServer method testAllocateByteBuffToReadInto.

@Test
public void testAllocateByteBuffToReadInto() throws Exception {
    int maxBuffersInPool = 10;
    ByteBufferPool pool = new ByteBufferPool(6 * 1024, maxBuffersInPool);
    initPoolWithAllBuffers(pool, maxBuffersInPool);
    ByteBuff buff = null;
    Pair<ByteBuff, CallCleanup> pair;
    // When the request size is less than 1/6th of the pool buffer size. We should use on demand
    // created on heap Buffer
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 200);
    buff = pair.getFirst();
    assertTrue(buff.hasArray());
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    assertNull(pair.getSecond());
    // When the request size is > 1/6th of the pool buffer size.
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    // Request size> pool buffer size
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    ByteBuffer[] bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertTrue(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(1024, bbs[1].limit());
    assertEquals(maxBuffersInPool - 2, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 6 * 1024 + 200);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertFalse(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(200, bbs[1].limit());
    assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    ByteBuffer[] buffers = new ByteBuffer[maxBuffersInPool - 1];
    for (int i = 0; i < maxBuffersInPool - 1; i++) {
        buffers[i] = pool.getBuffer();
    }
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 20 * 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertFalse(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(14 * 1024, bbs[1].limit());
    assertEquals(0, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(1, pool.getQueueSize());
    pool.getBuffer();
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
    buff = pair.getFirst();
    assertTrue(buff.hasArray());
    assertTrue(buff instanceof SingleByteBuff);
    assertEquals(7 * 1024, ((SingleByteBuff) buff).getEnclosingByteBuffer().limit());
    assertNull(pair.getSecond());
}
Also used : ByteBufferPool(org.apache.hadoop.hbase.io.ByteBufferPool) CallCleanup(org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 18 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestByteBufferIOEngine method testByteBufferIOEngineWithMBB.

@Test
public void testByteBufferIOEngineWithMBB() throws Exception {
    // 32 MB
    int capacity = 32 * 1024 * 1024;
    int testNum = 100;
    int maxBlockSize = 64 * 1024;
    ByteBufferIOEngine ioEngine = new ByteBufferIOEngine(capacity, false);
    int testOffsetAtStartNum = testNum / 10;
    int testOffsetAtEndNum = testNum / 10;
    for (int i = 0; i < testNum; i++) {
        byte val = (byte) (Math.random() * 255);
        int blockSize = (int) (Math.random() * maxBlockSize);
        if (blockSize == 0) {
            blockSize = 1;
        }
        byte[] byteArray = new byte[blockSize];
        for (int j = 0; j < byteArray.length; ++j) {
            byteArray[j] = val;
        }
        ByteBuffer srcBuffer = ByteBuffer.wrap(byteArray);
        int offset = 0;
        if (testOffsetAtStartNum > 0) {
            testOffsetAtStartNum--;
            offset = 0;
        } else if (testOffsetAtEndNum > 0) {
            testOffsetAtEndNum--;
            offset = capacity - blockSize;
        } else {
            offset = (int) (Math.random() * (capacity - maxBlockSize));
        }
        ioEngine.write(srcBuffer, offset);
        BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
        ioEngine.read(offset, blockSize, deserializer);
        ByteBuff dstBuffer = deserializer.buf;
        for (int j = 0; j < byteArray.length; ++j) {
            assertTrue(srcBuffer.get(j) == dstBuffer.get(j));
        }
    }
    assert testOffsetAtStartNum == 0;
    assert testOffsetAtEndNum == 0;
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 19 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestByteBufferIOEngine method testByteBufferIOEngine.

@Test
public void testByteBufferIOEngine() throws Exception {
    // 32 MB
    int capacity = 32 * 1024 * 1024;
    int testNum = 100;
    int maxBlockSize = 64 * 1024;
    ByteBufferIOEngine ioEngine = new ByteBufferIOEngine(capacity, false);
    int testOffsetAtStartNum = testNum / 10;
    int testOffsetAtEndNum = testNum / 10;
    for (int i = 0; i < testNum; i++) {
        byte val = (byte) (Math.random() * 255);
        int blockSize = (int) (Math.random() * maxBlockSize);
        if (blockSize == 0) {
            blockSize = 1;
        }
        byte[] byteArray = new byte[blockSize];
        for (int j = 0; j < byteArray.length; ++j) {
            byteArray[j] = val;
        }
        ByteBuffer srcBuffer = ByteBuffer.wrap(byteArray);
        int offset = 0;
        if (testOffsetAtStartNum > 0) {
            testOffsetAtStartNum--;
            offset = 0;
        } else if (testOffsetAtEndNum > 0) {
            testOffsetAtEndNum--;
            offset = capacity - blockSize;
        } else {
            offset = (int) (Math.random() * (capacity - maxBlockSize));
        }
        ioEngine.write(srcBuffer, offset);
        BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
        ioEngine.read(offset, blockSize, deserializer);
        ByteBuff dstBuffer = deserializer.buf;
        for (int j = 0; j < byteArray.length; ++j) {
            assertTrue(byteArray[j] == dstBuffer.get(j));
        }
    }
    assert testOffsetAtStartNum == 0;
    assert testOffsetAtEndNum == 0;
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 20 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class RowIndexSeekerV1 method getRow.

private ByteBuffer getRow(int index) {
    int offset = rowOffsets.getIntAfterPosition(index * Bytes.SIZEOF_INT);
    ByteBuff block = currentBuffer.duplicate();
    block.position(offset + Bytes.SIZEOF_LONG);
    short rowLen = block.getShort();
    block.asSubByteBuffer(block.position(), rowLen, tmpPair);
    ByteBuffer row = tmpPair.getFirst();
    row.position(tmpPair.getSecond()).limit(tmpPair.getSecond() + rowLen);
    return row;
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer)

Aggregations

ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)23 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)9 ByteBuffer (java.nio.ByteBuffer)8 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)5 Path (org.apache.hadoop.fs.Path)5 DataInputStream (java.io.DataInputStream)3 Random (java.util.Random)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 Compression (org.apache.hadoop.hbase.io.compress.Compression)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ByteArrayInputStream (java.io.ByteArrayInputStream)2 DataOutputStream (java.io.DataOutputStream)2 File (java.io.File)2 IOException (java.io.IOException)2 Cell (org.apache.hadoop.hbase.Cell)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)2