use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestByteRangeWithKVSerialization method testWritingAndReadingCells.
@Test
public void testWritingAndReadingCells() throws Exception {
final byte[] FAMILY = Bytes.toBytes("f1");
final byte[] QUALIFIER = Bytes.toBytes("q1");
final byte[] VALUE = Bytes.toBytes("v");
int kvCount = 1000000;
List<KeyValue> kvs = new ArrayList<>(kvCount);
int totalSize = 0;
Tag[] tags = new Tag[] { new ArrayBackedTag((byte) 1, "tag1") };
for (int i = 0; i < kvCount; i++) {
KeyValue kv = new KeyValue(Bytes.toBytes(i), FAMILY, QUALIFIER, i, VALUE, tags);
kv.setSequenceId(i);
kvs.add(kv);
totalSize += kv.getLength() + Bytes.SIZEOF_LONG;
}
PositionedByteRange pbr = new SimplePositionedMutableByteRange(totalSize);
for (KeyValue kv : kvs) {
writeCell(pbr, kv);
}
PositionedByteRange pbr1 = new SimplePositionedMutableByteRange(pbr.getBytes(), 0, pbr.getPosition());
for (int i = 0; i < kvCount; i++) {
KeyValue kv = readCell(pbr1);
KeyValue kv1 = kvs.get(i);
Assert.assertTrue(kv.equals(kv1));
Assert.assertTrue(Bytes.equals(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength(), kv1.getValueArray(), kv1.getValueOffset(), kv1.getValueLength()));
Assert.assertTrue(Bytes.equals(kv.getTagsArray(), kv.getTagsOffset(), kv.getTagsLength(), kv1.getTagsArray(), kv1.getTagsOffset(), kv1.getTagsLength()));
Assert.assertEquals(kv1.getSequenceId(), kv.getSequenceId());
}
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class PrefixTreeCodec method decodeKeyValues.
/**
* I don't think this method is called during normal HBase operation, so efficiency is not
* important.
*/
public ByteBuffer decodeKeyValues(DataInputStream source, int allocateHeaderLength, int skipLastBytes, HFileBlockDecodingContext decodingCtx) throws IOException {
// waste
ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(source);
sourceAsBuffer.mark();
PrefixTreeBlockMeta blockMeta = new PrefixTreeBlockMeta(new SingleByteBuff(sourceAsBuffer));
sourceAsBuffer.rewind();
int numV1BytesWithHeader = allocateHeaderLength + blockMeta.getNumKeyValueBytes();
byte[] keyValueBytesWithHeader = new byte[numV1BytesWithHeader];
ByteBuffer result = ByteBuffer.wrap(keyValueBytesWithHeader);
result.rewind();
CellSearcher searcher = null;
try {
boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc();
searcher = DecoderFactory.checkOut(new SingleByteBuff(sourceAsBuffer), includesMvcc);
while (searcher.advance()) {
KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(searcher.current());
// needs to be modified for DirectByteBuffers. no existing methods to
// write VLongs to byte[]
int offset = result.arrayOffset() + result.position();
System.arraycopy(currentCell.getBuffer(), currentCell.getOffset(), result.array(), offset, currentCell.getLength());
int keyValueLength = KeyValueUtil.length(currentCell);
ByteBufferUtils.skip(result, keyValueLength);
offset += keyValueLength;
if (includesMvcc) {
ByteBufferUtils.writeVLong(result, currentCell.getSequenceId());
}
}
//make it appear as if we were appending
result.position(result.limit());
return result;
} finally {
DecoderFactory.checkIn(searcher);
}
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestRowDataTrivialWithTags method individualSearcherAssertions.
@Override
public void individualSearcherAssertions(CellSearcher searcher) {
/**
* The searcher should get a token mismatch on the "r" branch. Assert that
* it skips not only rA, but rB as well.
*/
KeyValue afterLast = KeyValueUtil.createFirstOnRow(Bytes.toBytes("zzz"));
CellScannerPosition position = searcher.positionAtOrAfter(afterLast);
Assert.assertEquals(CellScannerPosition.AFTER_LAST, position);
Assert.assertNull(searcher.current());
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestRowEncoder method testReverseScanner.
/**
* probably not needed since testReverseScannerWithJitter() below is more thorough
*/
@Test
public void testReverseScanner() {
searcher.positionAfterLastCell();
int counter = -1;
while (searcher.previous()) {
++counter;
int oppositeIndex = rows.getInputs().size() - counter - 1;
KeyValue inputKv = rows.getInputs().get(oppositeIndex);
KeyValue outputKv = KeyValueUtil.copyToNewKeyValue(searcher.current());
assertKeyAndValueEqual(inputKv, outputKv);
}
Assert.assertEquals(rows.getInputs().size(), counter + 1);
}
use of org.apache.hadoop.hbase.KeyValue in project hbase by apache.
the class TestRowEncoder method compile.
@Before
public void compile() throws IOException {
// Always run with tags. But should also ensure that KVs without tags work fine
os = new ByteArrayOutputStream(1 << 20);
encoder = new PrefixTreeEncoder(os, includeMemstoreTS);
inputKvs = rows.getInputs();
for (KeyValue kv : inputKvs) {
encoder.write(kv);
}
encoder.flush();
totalBytes = encoder.getTotalBytes();
blockMetaWriter = encoder.getBlockMeta();
outputBytes = os.toByteArray();
// start reading, but save the assertions for @Test methods
ByteBuffer out = ByteBuffer.allocateDirect(outputBytes.length);
ByteBufferUtils.copyFromArrayToBuffer(out, outputBytes, 0, outputBytes.length);
out.position(0);
buffer = new SingleByteBuff(out);
blockMetaReader = new PrefixTreeBlockMeta(buffer);
searcher = new PrefixTreeArraySearcher(blockMetaReader, blockMetaReader.getRowTreeDepth(), blockMetaReader.getMaxRowLength(), blockMetaReader.getMaxQualifierLength(), blockMetaReader.getMaxTagsLength());
searcher.initOnBlock(blockMetaReader, buffer, includeMemstoreTS);
}
Aggregations