use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class FileIOEngine method read.
/**
* Transfers data from file to the given byte buffer
* @param offset The offset in the file where the first byte to be read
* @param length The length of buffer that should be allocated for reading
* from the file channel
* @return number of bytes read
* @throws IOException
*/
@Override
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer) throws IOException {
ByteBuffer dstBuffer = ByteBuffer.allocate(length);
accessFile(readAccessor, dstBuffer, offset);
// the results are not corrupted before consuming them.
if (dstBuffer.limit() != length) {
throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " + length + " expected");
}
return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, MemoryType.EXCLUSIVE);
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestPrefixTreeEncoding method testScanWithRandomData.
@Test
public void testScanWithRandomData() throws Exception {
PrefixTreeCodec encoder = new PrefixTreeCodec();
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
generateRandomTestData(kvset, numBatchesWritten++, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
seeker.setCurrentBuffer(new SingleByteBuff(readBuffer));
Cell previousKV = null;
do {
Cell currentKV = seeker.getCell();
System.out.println(currentKV);
if (previousKV != null && CellComparator.COMPARATOR.compare(currentKV, previousKV) < 0) {
dumpInputKVSet();
fail("Current kv " + currentKV + " is smaller than previous keyvalue " + previousKV);
}
if (!includesTag) {
assertFalse(currentKV.getTagsLength() > 0);
} else {
Assert.assertTrue(currentKV.getTagsLength() > 0);
}
previousKV = currentKV;
} while (seeker.next());
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestPrefixTreeEncoding method verifySeeking.
private void verifySeeking(EncodedSeeker encodeSeeker, ByteBuffer encodedData, int batchId) {
List<KeyValue> kvList = new ArrayList<>();
for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
kvList.clear();
encodeSeeker.setCurrentBuffer(new SingleByteBuff(encodedData));
KeyValue firstOnRow = KeyValueUtil.createFirstOnRow(getRowKey(batchId, i));
encodeSeeker.seekToKeyInBlock(new KeyValue.KeyOnlyKeyValue(firstOnRow.getBuffer(), firstOnRow.getKeyOffset(), firstOnRow.getKeyLength()), false);
boolean hasMoreOfEncodeScanner = encodeSeeker.next();
CollectionBackedScanner collectionScanner = new CollectionBackedScanner(this.kvset);
boolean hasMoreOfCollectionScanner = collectionScanner.seek(firstOnRow);
if (hasMoreOfEncodeScanner != hasMoreOfCollectionScanner) {
dumpInputKVSet();
fail("Get error result after seeking " + firstOnRow);
}
if (hasMoreOfEncodeScanner) {
if (CellComparator.COMPARATOR.compare(encodeSeeker.getCell(), collectionScanner.peek()) != 0) {
dumpInputKVSet();
fail("Expected " + collectionScanner.peek() + " actual " + encodeSeeker.getCell() + ", after seeking " + firstOnRow);
}
}
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestRpcServer method testAllocateByteBuffToReadInto.
@Test
public void testAllocateByteBuffToReadInto() throws Exception {
int maxBuffersInPool = 10;
ByteBufferPool pool = new ByteBufferPool(6 * 1024, maxBuffersInPool);
initPoolWithAllBuffers(pool, maxBuffersInPool);
ByteBuff buff = null;
Pair<ByteBuff, CallCleanup> pair;
// When the request size is less than 1/6th of the pool buffer size. We should use on demand
// created on heap Buffer
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 200);
buff = pair.getFirst();
assertTrue(buff.hasArray());
assertEquals(maxBuffersInPool, pool.getQueueSize());
assertNull(pair.getSecond());
// When the request size is > 1/6th of the pool buffer size.
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 1024);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(maxBuffersInPool, pool.getQueueSize());
// Request size> pool buffer size
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
ByteBuffer[] bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertTrue(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(1024, bbs[1].limit());
assertEquals(maxBuffersInPool - 2, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(maxBuffersInPool, pool.getQueueSize());
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 6 * 1024 + 200);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertFalse(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(200, bbs[1].limit());
assertEquals(maxBuffersInPool - 1, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(maxBuffersInPool, pool.getQueueSize());
ByteBuffer[] buffers = new ByteBuffer[maxBuffersInPool - 1];
for (int i = 0; i < maxBuffersInPool - 1; i++) {
buffers[i] = pool.getBuffer();
}
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 20 * 1024);
buff = pair.getFirst();
assertFalse(buff.hasArray());
assertTrue(buff instanceof MultiByteBuff);
bbs = ((MultiByteBuff) buff).getEnclosingByteBuffers();
assertEquals(2, bbs.length);
assertTrue(bbs[0].isDirect());
assertFalse(bbs[1].isDirect());
assertEquals(6 * 1024, bbs[0].limit());
assertEquals(14 * 1024, bbs[1].limit());
assertEquals(0, pool.getQueueSize());
assertNotNull(pair.getSecond());
// CallCleanup#run should put back the BB to pool.
pair.getSecond().run();
assertEquals(1, pool.getQueueSize());
pool.getBuffer();
pair = RpcServer.allocateByteBuffToReadInto(pool, RpcServer.getMinSizeForReservoirUse(pool), 7 * 1024);
buff = pair.getFirst();
assertTrue(buff.hasArray());
assertTrue(buff instanceof SingleByteBuff);
assertEquals(7 * 1024, ((SingleByteBuff) buff).getEnclosingByteBuffer().limit());
assertNull(pair.getSecond());
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class ByteBufferArray method asSubByteBuff.
/**
* Creates a ByteBuff from a given array of ByteBuffers from the given offset to the
* length specified. For eg, if there are 4 buffers forming an array each with length 10 and
* if we call asSubBuffer(5, 10) then we will create an MBB consisting of two BBs
* and the first one be a BB from 'position' 5 to a 'length' 5 and the 2nd BB will be from
* 'position' 0 to 'length' 5.
* @param offset
* @param len
* @return a ByteBuff formed from the underlying ByteBuffers
*/
public ByteBuff asSubByteBuff(long offset, int len) {
assert len >= 0;
long end = offset + len;
int startBuffer = (int) (offset / bufferSize), startBufferOffset = (int) (offset % bufferSize);
int endBuffer = (int) (end / bufferSize), endBufferOffset = (int) (end % bufferSize);
// Last buffer in the array is a dummy one with 0 capacity. Avoid sending back that
if (endBuffer == this.bufferCount) {
endBuffer--;
endBufferOffset = bufferSize;
}
assert startBuffer >= 0 && startBuffer < bufferCount;
assert endBuffer >= 0 && endBuffer < bufferCount || (endBuffer == bufferCount && endBufferOffset == 0);
if (startBuffer >= buffers.length || startBuffer < 0) {
String msg = "Failed subArray, start=" + offset + ",startBuffer=" + startBuffer + ",bufferSize=" + bufferSize;
LOG.error(msg);
throw new RuntimeException(msg);
}
int srcIndex = 0, cnt = -1;
ByteBuffer[] mbb = new ByteBuffer[endBuffer - startBuffer + 1];
for (int i = startBuffer, j = 0; i <= endBuffer; ++i, j++) {
ByteBuffer bb = buffers[i].duplicate();
if (i == startBuffer) {
cnt = bufferSize - startBufferOffset;
if (cnt > len)
cnt = len;
bb.limit(startBufferOffset + cnt).position(startBufferOffset);
} else if (i == endBuffer) {
cnt = endBufferOffset;
bb.position(0).limit(cnt);
} else {
cnt = bufferSize;
bb.position(0).limit(cnt);
}
mbb[j] = bb.slice();
srcIndex += cnt;
}
assert srcIndex == len;
if (mbb.length > 1) {
return new MultiByteBuff(mbb);
} else {
return new SingleByteBuff(mbb[0]);
}
}
Aggregations