Search in sources :

Example 1 with ColumnReader

use of org.apache.hadoop.hbase.codec.prefixtree.decode.column.ColumnReader in project hbase by apache.

the class TestColumnBuilder method testReaderRoundTrip.

/************* methods ********************************/
@Test
public void testReaderRoundTrip() throws IOException {
    for (int i = 0; i < sortedUniqueColumns.size(); ++i) {
        ByteRange column = sortedUniqueColumns.get(i);
        builder.addSorted(column);
    }
    List<byte[]> builderOutputArrays = builder.getArrays();
    for (int i = 0; i < builderOutputArrays.size(); ++i) {
        byte[] inputArray = sortedUniqueColumns.get(i).deepCopyToNewArray();
        byte[] outputArray = builderOutputArrays.get(i);
        boolean same = Bytes.equals(inputArray, outputArray);
        Assert.assertTrue(same);
    }
    Assert.assertEquals(sortedUniqueColumns.size(), builderOutputArrays.size());
    writer = new ColumnSectionWriter(blockMeta, builder, ColumnNodeType.QUALIFIER);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    writer.compile().writeBytes(baos);
    bytes = baos.toByteArray();
    buffer = new byte[blockMeta.getMaxQualifierLength()];
    reader = new ColumnReader(buffer, ColumnNodeType.QUALIFIER);
    reader.initOnBlock(blockMeta, new SingleByteBuff(ByteBuffer.wrap(bytes)));
    List<TokenizerNode> builderNodes = Lists.newArrayList();
    builder.appendNodes(builderNodes, true, true);
    int i = 0;
    for (TokenizerNode builderNode : builderNodes) {
        if (!builderNode.hasOccurrences()) {
            continue;
        }
        // we de-duped before adding to
        Assert.assertEquals(1, builderNode.getNumOccurrences());
        // builder
        int position = builderNode.getOutputArrayOffset();
        byte[] output = reader.populateBuffer(position).copyBufferToNewArray();
        boolean same = Bytes.equals(sortedUniqueColumns.get(i).deepCopyToNewArray(), output);
        Assert.assertTrue(same);
        ++i;
    }
}
Also used : ByteRange(org.apache.hadoop.hbase.util.ByteRange) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ColumnReader(org.apache.hadoop.hbase.codec.prefixtree.decode.column.ColumnReader) ColumnSectionWriter(org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnSectionWriter) TokenizerNode(org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode) Test(org.junit.Test)

Aggregations

ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 ColumnReader (org.apache.hadoop.hbase.codec.prefixtree.decode.column.ColumnReader)1 ColumnSectionWriter (org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnSectionWriter)1 TokenizerNode (org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode)1 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)1 ByteRange (org.apache.hadoop.hbase.util.ByteRange)1 Test (org.junit.Test)1