Search in sources :

Example 1 with MultiByteBuff

use of org.apache.hadoop.hbase.nio.MultiByteBuff in project hbase by apache.

the class RpcServer method allocateByteBuffToReadInto.

/**
   * This is extracted to a static method for better unit testing. We try to get buffer(s) from pool
   * as much as possible.
   *
   * @param pool The ByteBufferPool to use
   * @param minSizeForPoolUse Only for buffer size above this, we will try to use pool. Any buffer
   *           need of size below this, create on heap ByteBuffer.
   * @param reqLen Bytes count in request
   */
@VisibleForTesting
static Pair<ByteBuff, CallCleanup> allocateByteBuffToReadInto(ByteBufferPool pool, int minSizeForPoolUse, int reqLen) {
    ByteBuff resultBuf;
    List<ByteBuffer> bbs = new ArrayList<>((reqLen / pool.getBufferSize()) + 1);
    int remain = reqLen;
    ByteBuffer buf = null;
    while (remain >= minSizeForPoolUse && (buf = pool.getBuffer()) != null) {
        bbs.add(buf);
        remain -= pool.getBufferSize();
    }
    ByteBuffer[] bufsFromPool = null;
    if (bbs.size() > 0) {
        bufsFromPool = new ByteBuffer[bbs.size()];
        bbs.toArray(bufsFromPool);
    }
    if (remain > 0) {
        bbs.add(ByteBuffer.allocate(remain));
    }
    if (bbs.size() > 1) {
        ByteBuffer[] items = new ByteBuffer[bbs.size()];
        bbs.toArray(items);
        resultBuf = new MultiByteBuff(items);
    } else {
        // We are backed by single BB
        resultBuf = new SingleByteBuff(bbs.get(0));
    }
    resultBuf.limit(reqLen);
    if (bufsFromPool != null) {
        final ByteBuffer[] bufsFromPoolFinal = bufsFromPool;
        return new Pair<>(resultBuf, () -> {
            // Return back all the BBs to pool
            for (int i = 0; i < bufsFromPoolFinal.length; i++) {
                pool.putbackBuffer(bufsFromPoolFinal[i]);
            }
        });
    }
    return new Pair<>(resultBuf, null);
}
Also used : ArrayList(java.util.ArrayList) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Pair(org.apache.hadoop.hbase.util.Pair) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with MultiByteBuff

use of org.apache.hadoop.hbase.nio.MultiByteBuff in project hbase by apache.

the class TestHFileBlock method testBlockHeapSizeInternals.

protected void testBlockHeapSizeInternals() {
    if (ClassSize.is32BitJVM()) {
        assertEquals(64, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
    } else {
        assertEquals(72, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
    }
    for (int size : new int[] { 100, 256, 12345 }) {
        byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
        ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
        HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withHBaseCheckSum(false).withCompression(Algorithm.NONE).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).withChecksumType(ChecksumType.NULL).build();
        HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER, -1, 0, -1, meta);
        long byteBufferExpectedSize = ClassSize.align(ClassSize.estimateBase(new MultiByteBuff(buf).getClass(), true) + HConstants.HFILEBLOCK_HEADER_SIZE + size);
        long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
        long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
        long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
        assertEquals("Block data size: " + size + ", byte buffer expected " + "size: " + byteBufferExpectedSize + ", HFileBlock class expected " + "size: " + hfileBlockExpectedSize + ";", expected, block.heapSize());
    }
}
Also used : MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) ByteBuffer(java.nio.ByteBuffer)

Example 3 with MultiByteBuff

use of org.apache.hadoop.hbase.nio.MultiByteBuff in project hbase by apache.

the class TestByteBuffUtils method testCopyAndCompare.

@Test
public void testCopyAndCompare() throws Exception {
    ByteBuffer bb1 = ByteBuffer.allocate(50);
    ByteBuffer bb2 = ByteBuffer.allocate(50);
    MultiByteBuff src = new MultiByteBuff(bb1, bb2);
    for (int i = 0; i < 7; i++) {
        src.putLong(8l);
    }
    src.put((byte) 1);
    src.put((byte) 1);
    ByteBuffer bb3 = ByteBuffer.allocate(50);
    ByteBuffer bb4 = ByteBuffer.allocate(50);
    MultiByteBuff mbbDst = new MultiByteBuff(bb3, bb4);
    // copy from MBB to MBB
    mbbDst.put(0, src, 0, 100);
    int compareTo = ByteBuff.compareTo(src, 0, 100, mbbDst, 0, 100);
    assertTrue(compareTo == 0);
    // Copy from MBB to SBB
    bb3 = ByteBuffer.allocate(100);
    SingleByteBuff sbbDst = new SingleByteBuff(bb3);
    src.rewind();
    sbbDst.put(0, src, 0, 100);
    compareTo = ByteBuff.compareTo(src, 0, 100, sbbDst, 0, 100);
    assertTrue(compareTo == 0);
    // Copy from SBB to SBB
    bb3 = ByteBuffer.allocate(100);
    SingleByteBuff sbb = new SingleByteBuff(bb3);
    for (int i = 0; i < 7; i++) {
        sbb.putLong(8l);
    }
    sbb.put((byte) 1);
    sbb.put((byte) 1);
    bb4 = ByteBuffer.allocate(100);
    sbbDst = new SingleByteBuff(bb4);
    sbbDst.put(0, sbb, 0, 100);
    compareTo = ByteBuff.compareTo(sbb, 0, 100, sbbDst, 0, 100);
    assertTrue(compareTo == 0);
    // copy from SBB to MBB
    sbb.rewind();
    mbbDst = new MultiByteBuff(bb3, bb4);
    mbbDst.rewind();
    mbbDst.put(0, sbb, 0, 100);
    compareTo = ByteBuff.compareTo(sbb, 0, 100, mbbDst, 0, 100);
    assertTrue(compareTo == 0);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 4 with MultiByteBuff

use of org.apache.hadoop.hbase.nio.MultiByteBuff in project hbase by apache.

the class TestRpcServer method testAllocateByteBuffToReadInto.

@Test
public void testAllocateByteBuffToReadInto() throws Exception {
    int maxBuffersInPool = 10;
    ByteBufferPool pool = new ByteBufferPool(6 * 1024, maxBuffersInPool);
    initPoolWithAllBuffers(pool, maxBuffersInPool);
    ByteBuff buff = null;
    Pair<ByteBuff, CallCleanup> pair;
    // When the request size is less than 1/6th of the pool buffer size. We should use on demand
    // created on heap Buffer
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 200);
    buff = pair.getFirst();
    assertTrue(buff.hasArray());
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    assertNull(pair.getSecond());
    // When the request size is > 1/6th of the pool buffer size.
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    // Request size> pool buffer size
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    ByteBuffer[] bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertTrue(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(1024, bbs[1].limit());
    assertEquals(maxBuffersInPool - 2, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 6 * 1024 + 200);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertFalse(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(200, bbs[1].limit());
    assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    ByteBuffer[] buffers = new ByteBuffer[maxBuffersInPool - 1];
    for (int i = 0; i < maxBuffersInPool - 1; i++) {
        buffers[i] = pool.getBuffer();
    }
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 20 * 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertFalse(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(14 * 1024, bbs[1].limit());
    assertEquals(0, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(1, pool.getQueueSize());
    pool.getBuffer();
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
    buff = pair.getFirst();
    assertTrue(buff.hasArray());
    assertTrue(buff instanceof SingleByteBuff);
    assertEquals(7 * 1024, ((SingleByteBuff) buff).getEnclosingByteBuffer().limit());
    assertNull(pair.getSecond());
}
Also used : ByteBufferPool(org.apache.hadoop.hbase.io.ByteBufferPool) CallCleanup(org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 5 with MultiByteBuff

use of org.apache.hadoop.hbase.nio.MultiByteBuff in project hbase by apache.

the class TestHFileBlockIndex method testSecondaryIndexBinarySearch.

@Test
public void testSecondaryIndexBinarySearch() throws IOException {
    int numTotalKeys = 99;
    // Ensure no one made this even.
    assertTrue(numTotalKeys % 2 == 1);
    // We only add odd-index keys into the array that we will binary-search.
    int numSearchedKeys = (numTotalKeys - 1) / 2;
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    dos.writeInt(numSearchedKeys);
    int curAllEntriesSize = 0;
    int numEntriesAdded = 0;
    // Only odd-index elements of this array are used to keep the secondary
    // index entries of the corresponding keys.
    int[] secondaryIndexEntries = new int[numTotalKeys];
    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] k = RandomKeyValueUtil.randomOrderedKey(rand, i * 2);
        KeyValue cell = new KeyValue(k, Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("val"));
        //KeyValue cell = new KeyValue.KeyOnlyKeyValue(k, 0, k.length);
        keys.add(cell.getKey());
        String msgPrefix = "Key #" + i + " (" + Bytes.toStringBinary(k) + "): ";
        StringBuilder padding = new StringBuilder();
        while (msgPrefix.length() + padding.length() < 70) padding.append(' ');
        msgPrefix += padding;
        if (i % 2 == 1) {
            dos.writeInt(curAllEntriesSize);
            secondaryIndexEntries[i] = curAllEntriesSize;
            LOG.info(msgPrefix + "secondary index entry #" + ((i - 1) / 2) + ", offset " + curAllEntriesSize);
            curAllEntriesSize += cell.getKey().length + HFileBlockIndex.SECONDARY_INDEX_ENTRY_OVERHEAD;
            ++numEntriesAdded;
        } else {
            secondaryIndexEntries[i] = -1;
            LOG.info(msgPrefix + "not in the searched array");
        }
    }
    // Make sure the keys are increasing.
    for (int i = 0; i < keys.size() - 1; ++i) assertTrue(CellComparator.COMPARATOR.compare(new KeyValue.KeyOnlyKeyValue(keys.get(i), 0, keys.get(i).length), new KeyValue.KeyOnlyKeyValue(keys.get(i + 1), 0, keys.get(i + 1).length)) < 0);
    dos.writeInt(curAllEntriesSize);
    assertEquals(numSearchedKeys, numEntriesAdded);
    int secondaryIndexOffset = dos.size();
    assertEquals(Bytes.SIZEOF_INT * (numSearchedKeys + 2), secondaryIndexOffset);
    for (int i = 1; i <= numTotalKeys - 1; i += 2) {
        assertEquals(dos.size(), secondaryIndexOffset + secondaryIndexEntries[i]);
        long dummyFileOffset = getDummyFileOffset(i);
        int dummyOnDiskSize = getDummyOnDiskSize(i);
        LOG.debug("Storing file offset=" + dummyFileOffset + " and onDiskSize=" + dummyOnDiskSize + " at offset " + dos.size());
        dos.writeLong(dummyFileOffset);
        dos.writeInt(dummyOnDiskSize);
        LOG.debug("Stored key " + ((i - 1) / 2) + " at offset " + dos.size());
        dos.write(keys.get(i));
    }
    dos.writeInt(curAllEntriesSize);
    ByteBuffer nonRootIndex = ByteBuffer.wrap(baos.toByteArray());
    for (int i = 0; i < numTotalKeys; ++i) {
        byte[] searchKey = keys.get(i);
        byte[] arrayHoldingKey = new byte[searchKey.length + searchKey.length / 2];
        // To make things a bit more interesting, store the key we are looking
        // for at a non-zero offset in a new array.
        System.arraycopy(searchKey, 0, arrayHoldingKey, searchKey.length / 2, searchKey.length);
        KeyValue.KeyOnlyKeyValue cell = new KeyValue.KeyOnlyKeyValue(arrayHoldingKey, searchKey.length / 2, searchKey.length);
        int searchResult = BlockIndexReader.binarySearchNonRootIndex(cell, new MultiByteBuff(nonRootIndex), CellComparator.COMPARATOR);
        String lookupFailureMsg = "Failed to look up key #" + i + " (" + Bytes.toStringBinary(searchKey) + ")";
        int expectedResult;
        int referenceItem;
        if (i % 2 == 1) {
            // This key is in the array we search as the element (i - 1) / 2. Make
            // sure we find it.
            expectedResult = (i - 1) / 2;
            referenceItem = i;
        } else {
            // This key is not in the array but between two elements on the array,
            // in the beginning, or in the end. The result should be the previous
            // key in the searched array, or -1 for i = 0.
            expectedResult = i / 2 - 1;
            referenceItem = i - 1;
        }
        assertEquals(lookupFailureMsg, expectedResult, searchResult);
        // Now test we can get the offset and the on-disk-size using a
        // higher-level API function.s
        boolean locateBlockResult = (BlockIndexReader.locateNonRootIndexEntry(new MultiByteBuff(nonRootIndex), cell, CellComparator.COMPARATOR) != -1);
        if (i == 0) {
            assertFalse(locateBlockResult);
        } else {
            assertTrue(locateBlockResult);
            String errorMsg = "i=" + i + ", position=" + nonRootIndex.position();
            assertEquals(errorMsg, getDummyFileOffset(referenceItem), nonRootIndex.getLong());
            assertEquals(errorMsg, getDummyOnDiskSize(referenceItem), nonRootIndex.getInt());
        }
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ByteBuffer(java.nio.ByteBuffer) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) Test(org.junit.Test)

Aggregations

MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)10 ByteBuffer (java.nio.ByteBuffer)8 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)4 Test (org.junit.Test)4 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 DataOutputStream (java.io.DataOutputStream)3 ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 DataInputStream (java.io.DataInputStream)1 ArrayList (java.util.ArrayList)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 KeyValue (org.apache.hadoop.hbase.KeyValue)1 ByteBufferPool (org.apache.hadoop.hbase.io.ByteBufferPool)1 CallCleanup (org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup)1 Pair (org.apache.hadoop.hbase.util.Pair)1