use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class FileMmapEngine method read.
@Override
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer) throws IOException {
byte[] dst = new byte[length];
bufferArray.getMultiple(offset, length, dst);
return deserializer.deserialize(new SingleByteBuff(ByteBuffer.wrap(dst)), true, MemoryType.EXCLUSIVE);
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestPrefixTreeEncoding method testSeekBeforeWithFixedData.
@Test
public void testSeekBeforeWithFixedData() throws Exception {
formatRowNum = true;
PrefixTreeCodec encoder = new PrefixTreeCodec();
int batchId = numBatchesWritten++;
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(false).withIncludesTags(includesTag).withCompression(Algorithm.NONE).build();
HFileBlockEncodingContext blkEncodingCtx = new HFileBlockDefaultEncodingContext(DataBlockEncoding.PREFIX_TREE, new byte[0], meta);
ByteArrayOutputStream baosInMemory = new ByteArrayOutputStream();
DataOutputStream userDataStream = new DataOutputStream(baosInMemory);
generateFixedTestData(kvset, batchId, false, includesTag, encoder, blkEncodingCtx, userDataStream);
EncodedSeeker seeker = encoder.createSeeker(CellComparator.COMPARATOR, encoder.newDataBlockDecodingContext(meta));
byte[] onDiskBytes = baosInMemory.toByteArray();
ByteBuffer readBuffer = ByteBuffer.wrap(onDiskBytes, DataBlockEncoding.ID_SIZE, onDiskBytes.length - DataBlockEncoding.ID_SIZE);
seeker.setCurrentBuffer(new SingleByteBuff(readBuffer));
// Seek before the first keyvalue;
Cell seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId, 0), CF_BYTES);
seeker.seekToKeyInBlock(seekKey, true);
assertEquals(null, seeker.getCell());
// Seek before the middle keyvalue;
seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3), CF_BYTES);
seeker.seekToKeyInBlock(seekKey, true);
assertNotNull(seeker.getCell());
assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH / 3 - 1), CellUtil.cloneRow(seeker.getCell()));
// Seek before the last keyvalue;
seekKey = CellUtil.createFirstDeleteFamilyCellOnRow(Bytes.toBytes("zzzz"), CF_BYTES);
seeker.seekToKeyInBlock(seekKey, true);
assertNotNull(seeker.getCell());
assertArrayEquals(getRowKey(batchId, NUM_ROWS_PER_BATCH - 1), CellUtil.cloneRow(seeker.getCell()));
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class PrefixTreeCodec method decodeKeyValues.
/**
* I don't think this method is called during normal HBase operation, so efficiency is not
* important.
*/
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength, int skipLastBytes, HFileBlockDecodingContext decodingCtx) throws IOException {
// waste
ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);
sourceAsBuffer.mark();
PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(new SingleByteBuff(sourceAsBuffer));
sourceAsBuffer.rewind();
int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
result.rewind();
CellSearcher searcher = null;
try {
boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc();
searcher = DecoderFactory.checkOut(new SingleByteBuff(sourceAsBuffer), includesMvcc);
while (searcher.advance()) {
KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
// needs to be modified for DirectByteBuffers. no existing methods to
// write VLongs to byte[]
int offset = result.arrayOffset() + result.position();
System.arraycopy(currentCell.getBuffer(), currentCell.getOffset(), result.array(), offset, currentCell.getLength());
int keyValueLength = KeyValueUtil.length(currentCell);
ByteBufferUtils.skip(result, keyValueLength);
offset += keyValueLength;
if (includesMvcc) {
ByteBufferUtils.writeVLong(result, currentCell.getSequenceId());
}
}
//make it appear as if we were appending
result.position(result.limit());
return result;
} finally {
DecoderFactory.checkIn(searcher);
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestVIntTool method testRoundTrips.
@Test
public void testRoundTrips() {
Random random = new Random();
for (int i = 0; i < 10000; ++i) {
int value = random.nextInt(Integer.MAX_VALUE);
byte[] bytes = UVIntTool.getBytes(value);
int roundTripped = UVIntTool.getInt(new SingleByteBuff(ByteBuffer.wrap(bytes)), 0);
Assert.assertEquals(value, roundTripped);
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestRowEncoder method compile.
@Before
public void compile() throws IOException {
// Always run with tags. But should also ensure that KVs without tags work fine
os = new ByteArrayOutputStream(1 << 20);
encoder = new PrefixTreeEncoder(os, includeMemstoreTS);
inputKvs = rows.getInputs();
for (KeyValue kv : inputKvs) {
encoder.write(kv);
}
encoder.flush();
totalBytes = encoder.getTotalBytes();
blockMetaWriter = encoder.getBlockMeta();
outputBytes = os.toByteArray();
// start reading, but save the assertions for @Test methods
ByteBuffer out = ByteBuffer.allocateDirect(outputBytes.length);
ByteBufferUtils.copyFromArrayToBuffer(out, outputBytes, 0, outputBytes.length);
out.position(0);
buffer = new SingleByteBuff(out);
blockMetaReader = new PrefixTreeBlockMeta(buffer);
searcher = new PrefixTreeArraySearcher(blockMetaReader, blockMetaReader.getRowTreeDepth(), blockMetaReader.getMaxRowLength(), blockMetaReader.getMaxQualifierLength(), blockMetaReader.getMaxTagsLength());
searcher.initOnBlock(blockMetaReader, buffer, includeMemstoreTS);
}
Aggregations