use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class ByteBufferListOutputStream method getByteBuffers.
/**
* We can be assured that the buffers returned by this method are all flipped
* @return list of bytebuffers
*/
public List<ByteBuffer> getByteBuffers() {
if (!this.lastBufFlipped) {
this.lastBufFlipped = true;
// All the other BBs are already flipped while moving to the new BB.
curBuf.flip();
}
List<ByteBuffer> bbs = new ArrayList<>(this.allBufs.size());
for (SingleByteBuff bb : this.allBufs) {
bbs.add(bb.nioByteBuffers()[0]);
}
return bbs;
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class RowIndexCodecV1 method decodeKeyValues.
@Override
public ByteBuffer decodeKeyValues(DataInputStream source, HFileBlockDecodingContext decodingCtx) throws IOException {
ByteBuffer sourceAsBuffer = ByteBufferUtils.drainInputStreamToBuffer(// waste
source);
sourceAsBuffer.mark();
if (!decodingCtx.getHFileContext().isIncludesTags()) {
sourceAsBuffer.position(sourceAsBuffer.limit() - Bytes.SIZEOF_INT);
int onDiskSize = sourceAsBuffer.getInt();
sourceAsBuffer.reset();
ByteBuffer dup = sourceAsBuffer.duplicate();
dup.position(sourceAsBuffer.position());
dup.limit(sourceAsBuffer.position() + onDiskSize);
return dup.slice();
} else {
RowIndexSeekerV1 seeker = new RowIndexSeekerV1(decodingCtx);
seeker.setCurrentBuffer(new SingleByteBuff(sourceAsBuffer));
List<Cell> kvs = new ArrayList<>();
kvs.add(seeker.getCell());
while (seeker.next()) {
kvs.add(seeker.getCell());
}
boolean includesMvcc = decodingCtx.getHFileContext().isIncludesMvcc();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
try (DataOutputStream out = new DataOutputStream(baos)) {
for (Cell cell : kvs) {
KeyValue currentCell = KeyValueUtil.copyToNewKeyValue(cell);
out.write(currentCell.getBuffer(), currentCell.getOffset(), currentCell.getLength());
if (includesMvcc) {
WritableUtils.writeVLong(out, cell.getSequenceId());
}
}
out.flush();
}
return ByteBuffer.wrap(baos.getBuffer(), 0, baos.size());
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestTagCompressionContext method testCompressUncompressTags1.
@Test
public void testCompressUncompressTags1() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
TagCompressionContext context = new TagCompressionContext(LRUDictionary.class, Byte.MAX_VALUE);
KeyValue kv1 = createKVWithTags(2);
int tagsLength1 = kv1.getTagsLength();
ByteBuffer ib = ByteBuffer.wrap(kv1.getTagsArray());
context.compressTags(baos, ib, kv1.getTagsOffset(), tagsLength1);
KeyValue kv2 = createKVWithTags(3);
int tagsLength2 = kv2.getTagsLength();
ib = ByteBuffer.wrap(kv2.getTagsArray());
context.compressTags(baos, ib, kv2.getTagsOffset(), tagsLength2);
context.clear();
byte[] dest = new byte[tagsLength1];
ByteBuffer ob = ByteBuffer.wrap(baos.toByteArray());
context.uncompressTags(new SingleByteBuff(ob), dest, 0, tagsLength1);
assertTrue(Bytes.equals(kv1.getTagsArray(), kv1.getTagsOffset(), tagsLength1, dest, 0, tagsLength1));
dest = new byte[tagsLength2];
context.uncompressTags(new SingleByteBuff(ob), dest, 0, tagsLength2);
assertTrue(Bytes.equals(kv2.getTagsArray(), kv2.getTagsOffset(), tagsLength2, dest, 0, tagsLength2));
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestDataBlockEncoders method testNextOnSample.
@Test
public void testNextOnSample() throws IOException {
List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);
for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
if (encoding.getEncoder() == null) {
continue;
}
DataBlockEncoder encoder = encoding.getEncoder();
ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData);
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
int i = 0;
do {
KeyValue expectedKeyValue = sampleKv.get(i);
Cell cell = seeker.getCell();
if (PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, expectedKeyValue, cell) != 0) {
int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(expectedKeyValue, cell, false, true);
fail(String.format("next() produces wrong results " + "encoder: %s i: %d commonPrefix: %d" + "\n expected %s\n actual %s", encoder.toString(), i, commonPrefix, Bytes.toStringBinary(expectedKeyValue.getBuffer(), expectedKeyValue.getKeyOffset(), expectedKeyValue.getKeyLength()), CellUtil.toString(cell, false)));
}
i++;
} while (seeker.next());
}
}
use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.
the class TestDataBlockEncoders method testRowIndexWithTagsButNoTagsInCell.
@Test
public void testRowIndexWithTagsButNoTagsInCell() throws IOException {
List<KeyValue> kvList = new ArrayList<>();
byte[] row = new byte[0];
byte[] family = new byte[0];
byte[] qualifier = new byte[0];
byte[] value = new byte[0];
KeyValue expectedKV = new KeyValue(row, family, qualifier, 1L, Type.Put, value);
kvList.add(expectedKV);
DataBlockEncoding encoding = DataBlockEncoding.ROW_INDEX_V1;
DataBlockEncoder encoder = encoding.getEncoder();
ByteBuffer encodedBuffer = encodeKeyValues(encoding, kvList, getEncodingContext(conf, Algorithm.NONE, encoding), false);
HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(encoder.newDataBlockDecodingContext(conf, meta));
seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
Cell cell = seeker.getCell();
Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength());
}
Aggregations