Search in sources :

Example 1 with ByteBufferExtendedCell

use of org.apache.hadoop.hbase.ByteBufferExtendedCell in project hbase by apache.

the class TestTagCompressionContext method testCompressUncompressTagsWithOffheapKeyValue2.

@Test
public void testCompressUncompressTagsWithOffheapKeyValue2() throws Exception {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream daos = new ByteBufferWriterDataOutputStream(baos);
    TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
    ByteBufferExtendedCell kv1 = (ByteBufferExtendedCell) createOffheapKVWithTags(1);
    int tagsLength1 = kv1.getTagsLength();
    context.compressTags(daos, kv1.getTagsByteBuffer(), kv1.getTagsPosition(), tagsLength1);
    ByteBufferExtendedCell kv2 = (ByteBufferExtendedCell) createOffheapKVWithTags(3);
    int tagsLength2 = kv2.getTagsLength();
    context.compressTags(daos, kv2.getTagsByteBuffer(), kv2.getTagsPosition(), tagsLength2);
    context.clear();
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.getBuffer());
    byte[] dest = new byte[tagsLength1];
    context.uncompressTags(bais, dest, 0, tagsLength1);
    assertTrue(Bytes.equals(kv1.getTagsArray(), kv1.getTagsOffset(), tagsLength1, dest, 0, tagsLength1));
    dest = new byte[tagsLength2];
    context.uncompressTags(bais, dest, 0, tagsLength2);
    assertTrue(Bytes.equals(kv2.getTagsArray(), kv2.getTagsOffset(), tagsLength2, dest, 0, tagsLength2));
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) Test(org.junit.Test)

Example 2 with ByteBufferExtendedCell

use of org.apache.hadoop.hbase.ByteBufferExtendedCell in project hbase by apache.

the class TestTagCompressionContext method testCompressUncompressTagsWithOffheapKeyValue1.

@Test
public void testCompressUncompressTagsWithOffheapKeyValue1() throws Exception {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream daos = new ByteBufferWriterDataOutputStream(baos);
    TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
    ByteBufferExtendedCell kv1 = (ByteBufferExtendedCell) createOffheapKVWithTags(2);
    int tagsLength1 = kv1.getTagsLength();
    context.compressTags(daos, kv1.getTagsByteBuffer(), kv1.getTagsPosition(), tagsLength1);
    ByteBufferExtendedCell kv2 = (ByteBufferExtendedCell) createOffheapKVWithTags(3);
    int tagsLength2 = kv2.getTagsLength();
    context.compressTags(daos, kv2.getTagsByteBuffer(), kv2.getTagsPosition(), tagsLength2);
    context.clear();
    byte[] dest = new byte[tagsLength1];
    ByteBuffer ob = ByteBuffer.wrap(baos.getBuffer());
    context.uncompressTags(new SingleByteBuff(ob), dest, 0, tagsLength1);
    assertTrue(Bytes.equals(kv1.getTagsArray(), kv1.getTagsOffset(), tagsLength1, dest, 0, tagsLength1));
    dest = new byte[tagsLength2];
    context.uncompressTags(new SingleByteBuff(ob), dest, 0, tagsLength2);
    assertTrue(Bytes.equals(kv2.getTagsArray(), kv2.getTagsOffset(), tagsLength2, dest, 0, tagsLength2));
}
Also used : DataOutputStream(java.io.DataOutputStream) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 3 with ByteBufferExtendedCell

use of org.apache.hadoop.hbase.ByteBufferExtendedCell in project hbase by apache.

the class HRegion method getInternal.

private List<Cell> getInternal(Get get, boolean withCoprocessor, long nonceGroup, long nonce) throws IOException {
    List<Cell> results = new ArrayList<>();
    long before = EnvironmentEdgeManager.currentTime();
    // pre-get CP hook
    if (withCoprocessor && (coprocessorHost != null)) {
        if (coprocessorHost.preGet(get, results)) {
            metricsUpdateForGet(results, before);
            return results;
        }
    }
    Scan scan = new Scan(get);
    if (scan.getLoadColumnFamiliesOnDemandValue() == null) {
        scan.setLoadColumnFamiliesOnDemand(isLoadingCfsOnDemandDefault());
    }
    try (RegionScanner scanner = getScanner(scan, null, nonceGroup, nonce)) {
        List<Cell> tmp = new ArrayList<>();
        scanner.next(tmp);
        // See more details in HBASE-26036.
        for (Cell cell : tmp) {
            results.add(cell instanceof ByteBufferExtendedCell ? ((ByteBufferExtendedCell) cell).deepClone() : cell);
        }
    }
    // post-get CP hook
    if (withCoprocessor && (coprocessorHost != null)) {
        coprocessorHost.postGet(get, results);
    }
    metricsUpdateForGet(results, before);
    return results;
}
Also used : ArrayList(java.util.ArrayList) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Example 4 with ByteBufferExtendedCell

use of org.apache.hadoop.hbase.ByteBufferExtendedCell in project hbase by apache.

the class RSRpcServices method addSize.

/**
 * Method to account for the size of retained cells and retained data blocks.
 * @param context rpc call context
 * @param r result to add size.
 * @param lastBlock last block to check whether we need to add the block size in context.
 * @return an object that represents the last referenced block from this response.
 */
Object addSize(RpcCallContext context, Result r, Object lastBlock) {
    if (context != null && r != null && !r.isEmpty()) {
        for (Cell c : r.rawCells()) {
            context.incrementResponseCellSize(PrivateCellUtil.estimatedSerializedSizeOf(c));
            // So we make a guess.
            if (c instanceof ByteBufferExtendedCell) {
                ByteBufferExtendedCell bbCell = (ByteBufferExtendedCell) c;
                ByteBuffer bb = bbCell.getValueByteBuffer();
                if (bb != lastBlock) {
                    context.incrementResponseBlockSize(bb.capacity());
                    lastBlock = bb;
                }
            } else {
                // We're using the last block being the same as the current block as
                // a proxy for pointing to a new block. This won't be exact.
                // If there are multiple gets that bounce back and forth
                // Then it's possible that this will over count the size of
                // referenced blocks. However it's better to over count and
                // use two rpcs than to OOME the regionserver.
                byte[] valueArray = c.getValueArray();
                if (valueArray != lastBlock) {
                    context.incrementResponseBlockSize(valueArray.length);
                    lastBlock = valueArray;
                }
            }
        }
    }
    return lastBlock;
}
Also used : Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) ByteBuffer(java.nio.ByteBuffer)

Aggregations

ByteBufferExtendedCell (org.apache.hadoop.hbase.ByteBufferExtendedCell)4 DataOutputStream (java.io.DataOutputStream)2 ByteBuffer (java.nio.ByteBuffer)2 Cell (org.apache.hadoop.hbase.Cell)2 Test (org.junit.Test)2 ByteArrayInputStream (java.io.ByteArrayInputStream)1 ArrayList (java.util.ArrayList)1 Scan (org.apache.hadoop.hbase.client.Scan)1 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)1