Search in sources :

Example 26 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class FileIOEngine method read.

/**
   * Transfers data from file to the given byte buffer
   * @param offset The offset in the file where the first byte to be read
   * @param length The length of buffer that should be allocated for reading
   *               from the file channel
   * @return number of bytes read
   * @throws IOException
   */
@Override
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer) throws IOException {
    ByteBuffer dstBuffer = ByteBuffer.allocate(length);
    accessFile(readAccessor, dstBuffer, offset);
    // the results are not corrupted before consuming them.
    if (dstBuffer.limit() != length) {
        throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " + length + " expected");
    }
    return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, MemoryType.EXCLUSIVE);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuffer(java.nio.ByteBuffer)

Example 27 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestPrefixTreeEncoding method testScanWithRandomData.

@Test
public void testScanWithRandomData() throws Exception {
    PrefixTreeCodec encoder = new PrefixTreeCodec();
    ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
    DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
    HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
    HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
    generateRandomTestData(kvset, numBatchesWritten++, includesTag, encoder, blkEncodingCtx, userDataStream);
    EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
    byte[] onDiskBytes = baosInMemory.toByteArray();
    ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
    seeker.setCurrentBuffer(new SingleByteBuff(readBuffer));
    Cell previousKV = null;
    do {
        Cell currentKV = seeker.getCell();
        System.out.println(currentKV);
        if (previousKV != null && CellComparator.COMPARATOR.compare(currentKV, previousKV) < 0) {
            dumpInputKVSet();
            fail("Current kv " + currentKV + " is smaller than previous keyvalue " + previousKV);
        }
        if (!includesTag) {
            assertFalse(currentKV.getTagsLength() > 0);
        } else {
            Assert.assertTrue(currentKV.getTagsLength() > 0);
        }
        previousKV = currentKV;
    } while (seeker.next());
}
Also used : PrefixTreeCodec(org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeCodec) EncodedSeeker(org.apache.hadoop.hbase.io.encoding.DataBlockEncoder.EncodedSeeker) DataOutputStream(java.io.DataOutputStream) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) HFileContextBuilder(org.apache.hadoop.hbase.io.hfile.HFileContextBuilder) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ByteBuffer(java.nio.ByteBuffer) Cell(org.apache.hadoop.hbase.Cell) HFileContext(org.apache.hadoop.hbase.io.hfile.HFileContext) Test(org.junit.Test)

Example 28 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestPrefixTreeEncoding method verifySeeking.

private void verifySeeking(EncodedSeeker encodeSeeker, ByteBuffer encodedData, int batchId) {
    List<KeyValue> kvList = new ArrayList<>();
    for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
        kvList.clear();
        encodeSeeker.setCurrentBuffer(new SingleByteBuff(encodedData));
        KeyValue firstOnRow = KeyValueUtil.createFirstOnRow(getRowKey(batchId, i));
        encodeSeeker.seekToKeyInBlock(new KeyValue.KeyOnlyKeyValue(firstOnRow.getBuffer(), firstOnRow.getKeyOffset(), firstOnRow.getKeyLength()), false);
        boolean hasMoreOfEncodeScanner = encodeSeeker.next();
        CollectionBackedScanner collectionScanner = new CollectionBackedScanner(this.kvset);
        boolean hasMoreOfCollectionScanner = collectionScanner.seek(firstOnRow);
        if (hasMoreOfEncodeScanner != hasMoreOfCollectionScanner) {
            dumpInputKVSet();
            fail("Get error result after seeking " + firstOnRow);
        }
        if (hasMoreOfEncodeScanner) {
            if (CellComparator.COMPARATOR.compare(encodeSeeker.getCell(), collectionScanner.peek()) != 0) {
                dumpInputKVSet();
                fail("Expected " + collectionScanner.peek() + " actual " + encodeSeeker.getCell() + ", after seeking " + firstOnRow);
            }
        }
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) CollectionBackedScanner(org.apache.hadoop.hbase.util.CollectionBackedScanner)

Example 29 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestRpcServer method testAllocateByteBuffToReadInto.

@Test
public void testAllocateByteBuffToReadInto() throws Exception {
    int maxBuffersInPool = 10;
    ByteBufferPool pool = new ByteBufferPool(6 * 1024, maxBuffersInPool);
    initPoolWithAllBuffers(pool, maxBuffersInPool);
    ByteBuff buff = null;
    Pair<ByteBuff, CallCleanup> pair;
    // When the request size is less than 1/6th of the pool buffer size. We should use on demand
    // created on heap Buffer
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 200);
    buff = pair.getFirst();
    assertTrue(buff.hasArray());
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    assertNull(pair.getSecond());
    // When the request size is > 1/6th of the pool buffer size.
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    // Request size> pool buffer size
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    ByteBuffer[] bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertTrue(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(1024, bbs[1].limit());
    assertEquals(maxBuffersInPool - 2, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 6 * 1024 + 200);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertFalse(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(200, bbs[1].limit());
    assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(maxBuffersInPool, pool.getQueueSize());
    ByteBuffer[] buffers = new ByteBuffer[maxBuffersInPool - 1];
    for (int i = 0; i < maxBuffersInPool - 1; i++) {
        buffers[i] = pool.getBuffer();
    }
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 20 * 1024);
    buff = pair.getFirst();
    assertFalse(buff.hasArray());
    assertTrue(buff instanceof MultiByteBuff);
    bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
    assertEquals(2, bbs.length);
    assertTrue(bbs[0].isDirect());
    assertFalse(bbs[1].isDirect());
    assertEquals(6 * 1024, bbs[0].limit());
    assertEquals(14 * 1024, bbs[1].limit());
    assertEquals(0, pool.getQueueSize());
    assertNotNull(pair.getSecond());
    // CallCleanup#run should put back the BB to pool.
    pair.getSecond().run();
    assertEquals(1, pool.getQueueSize());
    pool.getBuffer();
    pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
    buff = pair.getFirst();
    assertTrue(buff.hasArray());
    assertTrue(buff instanceof SingleByteBuff);
    assertEquals(7 * 1024, ((SingleByteBuff) buff).getEnclosingByteBuffer().limit());
    assertNull(pair.getSecond());
}
Also used : ByteBufferPool(org.apache.hadoop.hbase.io.ByteBufferPool) CallCleanup(org.apache.hadoop.hbase.ipc.RpcServer.CallCleanup) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 30 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class ByteBufferArray method asSubByteBuff.

/**
   * Creates a ByteBuff from a given array of ByteBuffers from the given offset to the
   * length specified. For eg, if there are 4 buffers forming an array each with length 10 and
   * if we call asSubBuffer(5, 10) then we will create an MBB consisting of two BBs
   * and the first one be a BB from 'position' 5 to a 'length' 5 and the 2nd BB will be from
   * 'position' 0 to 'length' 5.
   * @param offset
   * @param len
   * @return a ByteBuff formed from the underlying ByteBuffers
   */
public ByteBuff asSubByteBuff(long offset, int len) {
    assert len >= 0;
    long end = offset + len;
    int startBuffer = (int) (offset / bufferSize), startBufferOffset = (int) (offset % bufferSize);
    int endBuffer = (int) (end / bufferSize), endBufferOffset = (int) (end % bufferSize);
    // Last buffer in the array is a dummy one with 0 capacity. Avoid sending back that
    if (endBuffer == this.bufferCount) {
        endBuffer--;
        endBufferOffset = bufferSize;
    }
    assert startBuffer >= 0 && startBuffer < bufferCount;
    assert endBuffer >= 0 && endBuffer < bufferCount || (endBuffer == bufferCount && endBufferOffset == 0);
    if (startBuffer >= buffers.length || startBuffer < 0) {
        String msg = "Failed subArray, start=" + offset + ",startBuffer=" + startBuffer + ",bufferSize=" + bufferSize;
        LOG.error(msg);
        throw new RuntimeException(msg);
    }
    int srcIndex = 0, cnt = -1;
    ByteBuffer[] mbb = new ByteBuffer[endBuffer - startBuffer + 1];
    for (int i = startBuffer, j = 0; i <= endBuffer; ++i, j++) {
        ByteBuffer bb = buffers[i].duplicate();
        if (i == startBuffer) {
            cnt = bufferSize - startBufferOffset;
            if (cnt > len)
                cnt = len;
            bb.limit(startBufferOffset + cnt).position(startBufferOffset);
        } else if (i == endBuffer) {
            cnt = endBufferOffset;
            bb.position(0).limit(cnt);
        } else {
            cnt = bufferSize;
            bb.position(0).limit(cnt);
        }
        mbb[j] = bb.slice();
        srcIndex += cnt;
    }
    assert srcIndex == len;
    if (mbb.length > 1) {
        return new MultiByteBuff(mbb);
    } else {
        return new SingleByteBuff(mbb[0]);
    }
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) ByteBuffer(java.nio.ByteBuffer)

Aggregations

SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)47 ByteBuffer (java.nio.ByteBuffer)27 Test (org.junit.Test)27 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)21 ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)19 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)12 ArrayList (java.util.ArrayList)9 KeyValue (org.apache.hadoop.hbase.KeyValue)9 Cell (org.apache.hadoop.hbase.Cell)8 DataOutputStream (java.io.DataOutputStream)7 Path (org.apache.hadoop.fs.Path)7 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)6 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)5 Compression (org.apache.hadoop.hbase.io.compress.Compression)4 Configuration (org.apache.hadoop.conf.Configuration)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 Random (java.util.Random)2