Search in sources :

Example 31 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestBlockMeta method testStreamSerialization.

@Test
public void testStreamSerialization() throws IOException {
    PrefixTreeBlockMeta original = createSample();
    ByteArrayOutputStream os = new ByteArrayOutputStream(10000);
    original.writeVariableBytesToOutputStream(os);
    ByteBuffer buffer = ByteBuffer.wrap(os.toByteArray());
    PrefixTreeBlockMeta roundTripped = new PrefixTreeBlockMeta(new SingleByteBuff(buffer));
    Assert.assertTrue(original.equals(roundTripped));
}
Also used : PrefixTreeBlockMeta(org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 32 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestColumnBuilder method testReaderRoundTrip.

/************* methods ********************************/
@Test
public void testReaderRoundTrip() throws IOException {
    for (int i = 0; i < sortedUniqueColumns.size(); ++i) {
        ByteRange column = sortedUniqueColumns.get(i);
        builder.addSorted(column);
    }
    List<byte[]> builderOutputArrays = builder.getArrays();
    for (int i = 0; i < builderOutputArrays.size(); ++i) {
        byte[] inputArray = sortedUniqueColumns.get(i).deepCopyToNewArray();
        byte[] outputArray = builderOutputArrays.get(i);
        boolean same = Bytes.equals(inputArray, outputArray);
        Assert.assertTrue(same);
    }
    Assert.assertEquals(sortedUniqueColumns.size(), builderOutputArrays.size());
    writer = new ColumnSectionWriter(blockMeta, builder, ColumnNodeType.QUALIFIER);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    writer.compile().writeBytes(baos);
    bytes = baos.toByteArray();
    buffer = new byte[blockMeta.getMaxQualifierLength()];
    reader = new ColumnReader(buffer, ColumnNodeType.QUALIFIER);
    reader.initOnBlock(blockMeta, new SingleByteBuff(ByteBuffer.wrap(bytes)));
    List<TokenizerNode> builderNodes = Lists.newArrayList();
    builder.appendNodes(builderNodes, true, true);
    int i = 0;
    for (TokenizerNode builderNode : builderNodes) {
        if (!builderNode.hasOccurrences()) {
            continue;
        }
        // we de-duped before adding to
        Assert.assertEquals(1, builderNode.getNumOccurrences());
        // builder
        int position = builderNode.getOutputArrayOffset();
        byte[] output = reader.populateBuffer(position).copyBufferToNewArray();
        boolean same = Bytes.equals(sortedUniqueColumns.get(i).deepCopyToNewArray(), output);
        Assert.assertTrue(same);
        ++i;
    }
}
Also used : ByteRange(org.apache.hadoop.hbase.util.ByteRange) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteArrayOutputStream(java.io.ByteArrayOutputStream) ColumnReader(org.apache.hadoop.hbase.codec.prefixtree.decode.column.ColumnReader) ColumnSectionWriter(org.apache.hadoop.hbase.codec.prefixtree.encode.column.ColumnSectionWriter) TokenizerNode(org.apache.hadoop.hbase.codec.prefixtree.encode.tokenize.TokenizerNode) Test(org.junit.Test)

Example 33 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestVLongTool method testFromBytesOffset.

@Test
public void testFromBytesOffset() {
    Assert.assertEquals(Long.MAX_VALUE, UVLongTool.getLong(new SingleByteBuff(ByteBuffer.wrap(UVLongTool.MAX_VALUE_BYTES)), 0));
    long ms = 1318966363481L;
    //    System.out.println(ms);
    byte[] bytes = UVLongTool.getBytes(ms);
    //    System.out.println(Arrays.toString(bytes));
    long roundTripped = UVLongTool.getLong(new SingleByteBuff(ByteBuffer.wrap(bytes)), 0);
    Assert.assertEquals(ms, roundTripped);
    int calculatedNumBytes = UVLongTool.numBytes(ms);
    int actualNumBytes = bytes.length;
    Assert.assertEquals(actualNumBytes, calculatedNumBytes);
    byte[] shiftedBytes = new byte[1000];
    int shift = 33;
    System.arraycopy(bytes, 0, shiftedBytes, shift, bytes.length);
    long shiftedRoundTrip = UVLongTool.getLong(new SingleByteBuff(ByteBuffer.wrap(shiftedBytes)), shift);
    Assert.assertEquals(ms, shiftedRoundTrip);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) Test(org.junit.Test)

Example 34 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class ChecksumUtil method verifyChunkedSums.

/**
 * Like the hadoop's {@link DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String, long)},
 * this method will also verify checksum of each chunk in data. the difference is: this method can
 * accept {@link ByteBuff} as arguments, we can not add it in hadoop-common so defined here.
 * @param dataChecksum to calculate the checksum.
 * @param data as the input
 * @param checksums to compare
 * @param pathName indicate that the data is read from which file.
 * @return a flag indicate the checksum match or mismatch.
 * @see org.apache.hadoop.util.DataChecksum#verifyChunkedSums(ByteBuffer, ByteBuffer, String,
 *      long)
 */
private static boolean verifyChunkedSums(DataChecksum dataChecksum, ByteBuff data, ByteBuff checksums, String pathName) {
    // byte[] allocation or copying. (HBASE-21917)
    if (data instanceof SingleByteBuff && checksums instanceof SingleByteBuff) {
        // the checksums ByteBuff must also be an SingleByteBuff because it's duplicated from data.
        ByteBuffer dataBB = (ByteBuffer) (data.nioByteBuffers()[0]).duplicate().position(data.position()).limit(data.limit());
        ByteBuffer checksumBB = (ByteBuffer) (checksums.nioByteBuffers()[0]).duplicate().position(checksums.position()).limit(checksums.limit());
        try {
            dataChecksum.verifyChunkedSums(dataBB, checksumBB, pathName, 0);
            return true;
        } catch (ChecksumException e) {
            return false;
        }
    }
    // If the block is a MultiByteBuff. we use a small byte[] to update the checksum many times for
    // reducing GC pressure. it's a rare case.
    int checksumTypeSize = dataChecksum.getChecksumType().size;
    if (checksumTypeSize == 0) {
        return true;
    }
    // and the other two need 4 bytes.
    assert checksumTypeSize == 4;
    int bytesPerChecksum = dataChecksum.getBytesPerChecksum();
    int startDataPos = data.position();
    data.mark();
    checksums.mark();
    try {
        // allocate an small buffer for reducing young GC (HBASE-21917), and copy 256 bytes from
        // ByteBuff to update the checksum each time. if we upgrade to an future JDK and hadoop
        // version which support DataCheckSum#update(ByteBuffer), we won't need to update the checksum
        // multiple times then.
        byte[] buf = new byte[CHECKSUM_BUF_SIZE];
        byte[] sum = new byte[checksumTypeSize];
        while (data.remaining() > 0) {
            int n = Math.min(data.remaining(), bytesPerChecksum);
            checksums.get(sum);
            dataChecksum.reset();
            for (int remain = n, len; remain > 0; remain -= len) {
                // Copy 256 bytes from ByteBuff to update the checksum each time, if the remaining
                // bytes is less than 256, then just update the remaining bytes.
                len = Math.min(CHECKSUM_BUF_SIZE, remain);
                data.get(buf, 0, len);
                dataChecksum.update(buf, 0, len);
            }
            int calculated = (int) dataChecksum.getValue();
            int stored = (sum[0] << 24 & 0xff000000) | (sum[1] << 16 & 0xff0000) | (sum[2] << 8 & 0xff00) | (sum[3] & 0xff);
            if (calculated != stored) {
                if (LOG.isTraceEnabled()) {
                    long errPos = data.position() - startDataPos - n;
                    LOG.trace("Checksum error: {} at {} expected: {} got: {}", pathName, errPos, stored, calculated);
                }
                return false;
            }
        }
    } finally {
        data.reset();
        checksums.reset();
    }
    return true;
}
Also used : ChecksumException(org.apache.hadoop.fs.ChecksumException) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuffer(java.nio.ByteBuffer)

Example 35 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class ByteBufferListOutputStream method allocateNewBuffer.

private void allocateNewBuffer() {
    if (this.curBuf != null) {
        // On the current buf set limit = pos and pos = 0.
        this.curBuf.flip();
    }
    // Get an initial ByteBuffer from the allocator.
    SingleByteBuff sbb = allocator.allocateOneBuffer();
    this.curBuf = sbb.nioByteBuffers()[0];
    this.allBufs.add(sbb);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff)

Aggregations

SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)47 ByteBuffer (java.nio.ByteBuffer)27 Test (org.junit.Test)27 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)21 ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)19 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)12 ArrayList (java.util.ArrayList)9 KeyValue (org.apache.hadoop.hbase.KeyValue)9 Cell (org.apache.hadoop.hbase.Cell)8 DataOutputStream (java.io.DataOutputStream)7 Path (org.apache.hadoop.fs.Path)7 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)6 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)5 Compression (org.apache.hadoop.hbase.io.compress.Compression)4 Configuration (org.apache.hadoop.conf.Configuration)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 Random (java.util.Random)2