Search in sources :

Example 1 with PrefixTreeEncoder

use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.

the class PrefixTreeCodec method encode.

@Override
public int encode(Cell cell, HFileBlockEncodingContext encodingCtx, DataOutputStream out) throws IOException {
    PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
    PrefixTreeEncoder builder = state.builder;
    builder.write(cell);
    int size = KeyValueUtil.length(cell);
    if (encodingCtx.getHFileContext().isIncludesMvcc()) {
        size += WritableUtils.getVIntSize(cell.getSequenceId());
    }
    return size;
}
Also used : PrefixTreeEncoder(org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder)

Example 2 with PrefixTreeEncoder

use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.

the class TestRowEncoder method compile.

@Before
public void compile() throws IOException {
    // Always run with tags. But should also ensure that KVs without tags work fine
    os = new ByteArrayOutputStream(1 << 20);
    encoder = new PrefixTreeEncoder(os, includeMemstoreTS);
    inputKvs = rows.getInputs();
    for (KeyValue kv : inputKvs) {
        encoder.write(kv);
    }
    encoder.flush();
    totalBytes = encoder.getTotalBytes();
    blockMetaWriter = encoder.getBlockMeta();
    outputBytes = os.toByteArray();
    // start reading, but save the assertions for @Test methods
    ByteBuffer out = ByteBuffer.allocateDirect(outputBytes.length);
    ByteBufferUtils.copyFromArrayToBuffer(out, outputBytes, 0, outputBytes.length);
    out.position(0);
    buffer = new SingleByteBuff(out);
    blockMetaReader = new PrefixTreeBlockMeta(buffer);
    searcher = new PrefixTreeArraySearcher(blockMetaReader, blockMetaReader.getRowTreeDepth(), blockMetaReader.getMaxRowLength(), blockMetaReader.getMaxQualifierLength(), blockMetaReader.getMaxTagsLength());
    searcher.initOnBlock(blockMetaReader, buffer, includeMemstoreTS);
}
Also used : PrefixTreeEncoder(org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder) PrefixTreeArraySearcher(org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher) KeyValue(org.apache.hadoop.hbase.KeyValue) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) PrefixTreeBlockMeta(org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ByteBuffer(java.nio.ByteBuffer) Before(org.junit.Before)

Example 3 with PrefixTreeEncoder

use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.

the class PrefixTreeCodec method startBlockEncoding.

@Override
public void startBlockEncoding(HFileBlockEncodingContext blkEncodingCtx, DataOutputStream out) throws IOException {
    if (blkEncodingCtx.getClass() != HFileBlockDefaultEncodingContext.class) {
        throw new IOException(this.getClass().getName() + " only accepts " + HFileBlockDefaultEncodingContext.class.getName() + " as the " + "encoding context.");
    }
    HFileBlockDefaultEncodingContext encodingCtx = (HFileBlockDefaultEncodingContext) blkEncodingCtx;
    encodingCtx.prepareEncoding(out);
    PrefixTreeEncoder builder = EncoderFactory.checkOut(out, encodingCtx.getHFileContext().isIncludesMvcc());
    PrefixTreeEncodingState state = new PrefixTreeEncodingState();
    state.builder = builder;
    blkEncodingCtx.setEncodingState(state);
}
Also used : PrefixTreeEncoder(org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder) HFileBlockDefaultEncodingContext(org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext) IOException(java.io.IOException)

Example 4 with PrefixTreeEncoder

use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.

the class PrefixTreeCodec method endBlockEncoding.

@Override
public void endBlockEncoding(HFileBlockEncodingContext encodingCtx, DataOutputStream out, byte[] uncompressedBytesWithHeader) throws IOException {
    PrefixTreeEncodingState state = (PrefixTreeEncodingState) encodingCtx.getEncodingState();
    PrefixTreeEncoder builder = state.builder;
    builder.flush();
    EncoderFactory.checkIn(builder);
    // do i need to check this, or will it always be DataBlockEncoding.PREFIX_TREE?
    if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) {
        encodingCtx.postEncoding(BlockType.ENCODED_DATA);
    } else {
        encodingCtx.postEncoding(BlockType.DATA);
    }
}
Also used : PrefixTreeEncoder(org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder)

Example 5 with PrefixTreeEncoder

use of org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder in project hbase by apache.

the class TestRowDataUrlsExample method main.

/**
   * Used for generating docs.
   */
public static void main(String... args) throws IOException {
    System.out.println("-- inputs --");
    System.out.println(KeyValueTestUtil.toStringWithPadding(kvs, true));
    ByteArrayOutputStream os = new ByteArrayOutputStream(1 << 20);
    PrefixTreeEncoder encoder = new PrefixTreeEncoder(os, false);
    for (KeyValue kv : kvs) {
        encoder.write(kv);
    }
    encoder.flush();
    System.out.println("-- qualifier SortedPtBuilderNodes --");
    for (TokenizerNode tokenizer : encoder.getQualifierWriter().getNonLeaves()) {
        System.out.println(tokenizer);
    }
    for (TokenizerNode tokenizerNode : encoder.getQualifierWriter().getLeaves()) {
        System.out.println(tokenizerNode);
    }
    System.out.println("-- qualifier PtColumnNodeWriters --");
    for (ColumnNodeWriter writer : encoder.getQualifierWriter().getColumnNodeWriters()) {
        System.out.println(writer);
    }
    System.out.println("-- rowKey SortedPtBuilderNodes --");
    for (TokenizerNode tokenizerNode : encoder.getRowWriter().getNonLeaves()) {
        System.out.println(tokenizerNode);
    }
    for (TokenizerNode tokenizerNode : encoder.getRowWriter().getLeaves()) {
        System.out.println(tokenizerNode);
    }
    System.out.println("-- row PtRowNodeWriters --");
    for (RowNodeWriter writer : encoder.getRowWriter().getNonLeafWriters()) {
        System.out.println(writer);
    }
    for (RowNodeWriter writer : encoder.getRowWriter().getLeafWriters()) {
        System.out.println(writer);
    }
    System.out.println("-- concatenated values --");
    System.out.println(Bytes.toStringBinary(encoder.getValueByteRange().deepCopyToNewArray()));
}
Also used : PrefixTreeEncoder(org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder) KeyValue(org.apache.hadoop.hbase.KeyValue) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ColumnNodeWriter(org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnNodeWriter) TokenizerNode(org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode) RowNodeWriter(org.apache.hadoop.hbase.codec.prefixtree.encode.row.RowNodeWriter)

Aggregations

PrefixTreeEncoder (org.apache.hadoop.hbase.codec.prefixtree.encode.PrefixTreeEncoder)5 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 IOException (java.io.IOException)1 ByteBuffer (java.nio.ByteBuffer)1 PrefixTreeBlockMeta (org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta)1 PrefixTreeArraySearcher (org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeArraySearcher)1 ColumnNodeWriter (org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnNodeWriter)1 RowNodeWriter (org.apache.hadoop.hbase.codec.prefixtree.encode.row.RowNodeWriter)1 TokenizerNode (org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode)1 HFileBlockDefaultEncodingContext (org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext)1 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)1 Before (org.junit.Before)1