use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.
the class PrefixTreeCodec method encode.
@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException {
PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
PrefixTreeEncoder builder = state.builder;
builder.write(cell);
int size = KeyValueUtil.length(cell);
if (encodingCtx.getHFileContext().isIncludesMvcc()) {
size += WritableUtils.getVIntSize(cell.getSequenceId());
}
return size;
}
use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.
the class TestRowEncoder method compile.
@Before
public void compile() throws IOException {
// Always run with tags. But should also ensure that KVs without tags work fine
os = new ByteArrayOutputStream(1 << 20);
encoder = new PrefixTreeEncoder(os, includeMemstoreTS);
inputKvs = rows.getInputs();
for (KeyValue kv : inputKvs) {
encoder.write(kv);
}
encoder.flush();
totalBytes = encoder.getTotalBytes();
blockMetaWriter = encoder.getBlockMeta();
outputBytes = os.toByteArray();
// start reading, but save the assertions for @Test methods
ByteBuffer out = ByteBuffer.allocateDirect(outputBytes.length);
ByteBufferUtils.copyFromArrayToBuffer(out, outputBytes, 0, outputBytes.length);
out.position(0);
buffer = new SingleByteBuff(out);
blockMetaReader = new PrefixTreeBlockMeta(buffer);
searcher = new PrefixTreeArraySearcher(blockMetaReader, blockMetaReader.getRowTreeDepth(), blockMetaReader.getMaxRowLength(), blockMetaReader.getMaxQualifierLength(), blockMetaReader.getMaxTagsLength());
searcher.initOnBlock(blockMetaReader, buffer, includeMemstoreTS);
}
use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.
the class PrefixTreeCodec method startBlockEncoding.
@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out) throws IOException {
if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
throw new IOException(this.getClass().getName() + " only accepts " + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
}
HFileBlockDefaultEncodingContext encodingCtx = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
encodingCtx.prepareEncoding(out);
PrefixTreeEncoder builder = EncoderFactory.checkOut(out, encodingCtx.getHFileContext().isIncludesMvcc());
PrefixTreeEncodingState state = new PrefixTreeEncodingState();
state.builder = builder;
blkEncodingCtx.setEncodingState(state);
}
use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.
the class PrefixTreeCodec method endBlockEncoding.
@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, byte[] uncompressedBytesWithHeader) throws IOException {
PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
PrefixTreeEncoder builder = state.builder;
builder.flush();
EncoderFactory.checkIn(builder);
// do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
encodingCtx.postEncoding(BlockType.ENCODED_DATA);
} else {
encodingCtx.postEncoding(BlockType.DATA);
}
}
use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.
the class TestRowDataUrlsExample method main.
/**
* Used for generating docs.
*/
public static void main(String... args) throws IOException {
System.out.println("-- inputs --");
System.out.println(KeyValueTestUtil.toStringWithPadding(kvs, true));
ByteArrayOutputStream os = new ByteArrayOutputStream(1 << 20);
PrefixTreeEncoder encoder = new PrefixTreeEncoder(os, false);
for (KeyValue kv : kvs) {
encoder.write(kv);
}
encoder.flush();
System.out.println("-- qualifier SortedPtBuilderNodes --");
for (TokenizerNode tokenizer : encoder.getQualifierWriter().getNonLeaves()) {
System.out.println(tokenizer);
}
for (TokenizerNode tokenizerNode : encoder.getQualifierWriter().getLeaves()) {
System.out.println(tokenizerNode);
}
System.out.println("-- qualifier PtColumnNodeWriters --");
for (ColumnNodeWriter writer : encoder.getQualifierWriter().getColumnNodeWriters()) {
System.out.println(writer);
}
System.out.println("-- rowKey SortedPtBuilderNodes --");
for (TokenizerNode tokenizerNode : encoder.getRowWriter().getNonLeaves()) {
System.out.println(tokenizerNode);
}
for (TokenizerNode tokenizerNode : encoder.getRowWriter().getLeaves()) {
System.out.println(tokenizerNode);
}
System.out.println("-- row PtRowNodeWriters --");
for (RowNodeWriter writer : encoder.getRowWriter().getNonLeafWriters()) {
System.out.println(writer);
}
for (RowNodeWriter writer : encoder.getRowWriter().getLeafWriters()) {
System.out.println(writer);
}
System.out.println("-- concatenated values --");
System.out.println(Bytes.toStringBinary(encoder.getValueByteRange().deepCopyToNewArray()));
}
Aggregations