Search in sources :

Example 6 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class RowIndexSeekerV1 method setCurrentBuffer.

@Override
public void setCurrentBuffer(ByteBuff buffer) {
    int onDiskSize = buffer.getInt(buffer.limit() - Bytes.SIZEOF_INT);
    // Data part
    ByteBuff dup = buffer.duplicate();
    dup.position(buffer.position());
    dup.limit(buffer.position() + onDiskSize);
    currentBuffer = dup.slice();
    current.currentBuffer = currentBuffer;
    buffer.skip(onDiskSize);
    // Row offset
    rowNumber = buffer.getInt();
    int totalRowOffsetsLength = Bytes.SIZEOF_INT * rowNumber;
    ByteBuff rowDup = buffer.duplicate();
    rowDup.position(buffer.position());
    rowDup.limit(buffer.position() + totalRowOffsetsLength);
    rowOffsets = rowDup.slice();
    decodeFirst();
}
Also used : ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff)

Example 7 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class RpcServer method allocateByteBuffToReadInto.

/**
   * This is extracted to a static method for better unit testing. We try to get buffer(s) from pool
   * as much as possible.
   *
   * @param pool The ByteBufferPool to use
   * @param minSizeForPoolUse Only for buffer size above this, we will try to use pool. Any buffer
   *           need of size below this, create on heap ByteBuffer.
   * @param reqLen Bytes count in request
   */
@VisibleForTesting
static Pair<ByteBuff, CallCleanup> allocateByteBuffToReadInto(ByteBufferPool pool, int minSizeForPoolUse, int reqLen) {
    ByteBuff resultBuf;
    List<ByteBuffer> bbs = new ArrayList<>((reqLen / pool.getBufferSize()) + 1);
    int remain = reqLen;
    ByteBuffer buf = null;
    while (remain >= minSizeForPoolUse && (buf = pool.getBuffer()) != null) {
        bbs.add(buf);
        remain -= pool.getBufferSize();
    }
    ByteBuffer[] bufsFromPool = null;
    if (bbs.size() > 0) {
        bufsFromPool = new ByteBuffer[bbs.size()];
        bbs.toArray(bufsFromPool);
    }
    if (remain > 0) {
        bbs.add(ByteBuffer.allocate(remain));
    }
    if (bbs.size() > 1) {
        ByteBuffer[] items = new ByteBuffer[bbs.size()];
        bbs.toArray(items);
        resultBuf = new MultiByteBuff(items);
    } else {
        // We are backed by single BB
        resultBuf = new SingleByteBuff(bbs.get(0));
    }
    resultBuf.limit(reqLen);
    if (bufsFromPool != null) {
        final ByteBuffer[] bufsFromPoolFinal = bufsFromPool;
        return new Pair<>(resultBuf, () -> {
            // Return back all the BBs to pool
            for (int i = 0; i < bufsFromPoolFinal.length; i++) {
                pool.putbackBuffer(bufsFromPoolFinal[i]);
            }
        });
    }
    return new Pair<>(resultBuf, null);
}
Also used : ArrayList(java.util.ArrayList) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Pair(org.apache.hadoop.hbase.util.Pair) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 8 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestChecksum method testAllChecksumTypes.

/**
   * Test all checksum types by writing and reading back blocks.
   */
@Test
public void testAllChecksumTypes() throws IOException {
    List<ChecksumType> cktypes = new ArrayList<>(Arrays.asList(ChecksumType.values()));
    for (Iterator<ChecksumType> itr = cktypes.iterator(); itr.hasNext(); ) {
        ChecksumType cktype = itr.next();
        Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + cktype.getName());
        FSDataOutputStream os = fs.create(path);
        HFileContext meta = new HFileContextBuilder().withChecksumType(cktype).build();
        HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
        DataOutputStream dos = hbw.startWriting(BlockType.DATA);
        for (int i = 0; i < 1000; ++i) {
            dos.writeInt(i);
        }
        hbw.writeHeaderAndData(os);
        int totalSize = hbw.getOnDiskSizeWithHeader();
        os.close();
        // Use hbase checksums.
        assertEquals(true, hfs.useHBaseChecksum());
        FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
        meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(is, totalSize, (HFileSystem) fs, path, meta);
        HFileBlock b = hbr.readBlockData(0, -1, false);
        ByteBuff data = b.getBufferWithoutHeader();
        for (int i = 0; i < 1000; i++) {
            assertEquals(i, data.getInt());
        }
        boolean exception_thrown = false;
        try {
            data.getInt();
        } catch (BufferUnderflowException e) {
            exception_thrown = true;
        }
        assertTrue(exception_thrown);
        assertEquals(0, HFile.getChecksumFailuresCount());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) ChecksumType(org.apache.hadoop.hbase.util.ChecksumType) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) BufferUnderflowException(java.nio.BufferUnderflowException) Test(org.junit.Test)

Example 9 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestChecksum method testChecksumCorruptionInternals.

protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i) dos.writeInt(i);
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();
            // Use hbase checksums. 
            assertEquals(true, hfs.useHBaseChecksum());
            // Do a read that purposely introduces checksum verification failures.
            FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).build();
            HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(is, totalSize, fs, path, meta);
            HFileBlock b = hbr.readBlockData(0, -1, pread);
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            ByteBuff bb = b.unpack(meta, hbr).getBufferWithoutHeader();
            DataInputStream in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we encountered hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(1, HFile.getChecksumFailuresCount());
            validateData(in);
            // requests. Verify that this is correct.
            for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
                b = hbr.readBlockData(0, -1, pread);
                assertEquals(0, HFile.getChecksumFailuresCount());
            }
            // The next read should have hbase checksum verification reanabled,
            // we verify this by assertng that there was a hbase-checksum failure.
            b = hbr.readBlockData(0, -1, pread);
            assertEquals(1, HFile.getChecksumFailuresCount());
            // Since the above encountered a checksum failure, we switch
            // back to not checking hbase checksums.
            b = hbr.readBlockData(0, -1, pread);
            assertEquals(0, HFile.getChecksumFailuresCount());
            is.close();
            // Now, use a completely new reader. Switch off hbase checksums in 
            // the configuration. In this case, we should not detect
            // any retries within hbase. 
            HFileSystem newfs = new HFileSystem(TEST_UTIL.getConfiguration(), false);
            assertEquals(false, newfs.useHBaseChecksum());
            is = new FSDataInputStreamWrapper(newfs, path);
            hbr = new CorruptedFSReaderImpl(is, totalSize, newfs, path, meta);
            b = hbr.readBlockData(0, -1, pread);
            is.close();
            b.sanityCheck();
            b = b.unpack(meta, hbr);
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            bb = b.getBufferWithoutHeader();
            in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we did not encounter hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(0, HFile.getChecksumFailuresCount());
            validateData(in);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 10 with ByteBuff

use of org.apache.hadoop.hbase.nio.ByteBuff in project hbase by apache.

the class TestHFileBlock method testInternals.

private void testInternals() throws IOException {
    final int numBlocks = 5;
    if (includesTag) {
        TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
    }
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
                Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo + "_" + encoding.toString());
                FSDataOutputStream os = fs.create(path);
                HFileDataBlockEncoder dataBlockEncoder = (encoding != DataBlockEncoding.NONE) ? new HFileDataBlockEncoderImpl(encoding) : NoOpDataBlockEncoder.INSTANCE;
                HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
                HFileBlock.Writer hbw = new HFileBlock.Writer(dataBlockEncoder, meta);
                long totalSize = 0;
                final List<Integer> encodedSizes = new ArrayList<>();
                final List<ByteBuffer> encodedBlocks = new ArrayList<>();
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    hbw.startWriting(BlockType.DATA);
                    writeTestKeyValues(hbw, blockId, includesMemstoreTS, includesTag);
                    hbw.writeHeaderAndData(os);
                    int headerLen = HConstants.HFILEBLOCK_HEADER_SIZE;
                    byte[] encodedResultWithHeader = hbw.getUncompressedBufferWithHeader().array();
                    final int encodedSize = encodedResultWithHeader.length - headerLen;
                    if (encoding != DataBlockEncoding.NONE) {
                        // We need to account for the two-byte encoding algorithm ID that
                        // comes after the 24-byte block header but before encoded KVs.
                        headerLen += DataBlockEncoding.ID_SIZE;
                    }
                    byte[] encodedDataSection = new byte[encodedResultWithHeader.length - headerLen];
                    System.arraycopy(encodedResultWithHeader, headerLen, encodedDataSection, 0, encodedDataSection.length);
                    final ByteBuffer encodedBuf = ByteBuffer.wrap(encodedDataSection);
                    encodedSizes.add(encodedSize);
                    encodedBlocks.add(encodedBuf);
                    totalSize += hbw.getOnDiskSizeWithHeader();
                }
                os.close();
                FSDataInputStream is = fs.open(path);
                meta = new HFileContextBuilder().withHBaseCheckSum(true).withCompression(algo).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).build();
                HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(is, totalSize, meta);
                hbr.setDataBlockEncoder(dataBlockEncoder);
                hbr.setIncludesMemstoreTS(includesMemstoreTS);
                HFileBlock blockFromHFile, blockUnpacked;
                int pos = 0;
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    blockFromHFile = hbr.readBlockData(pos, -1, pread);
                    assertEquals(0, HFile.getChecksumFailuresCount());
                    blockFromHFile.sanityCheck();
                    pos += blockFromHFile.getOnDiskSizeWithHeader();
                    assertEquals((int) encodedSizes.get(blockId), blockFromHFile.getUncompressedSizeWithoutHeader());
                    assertEquals(meta.isCompressedOrEncrypted(), !blockFromHFile.isUnpacked());
                    long packedHeapsize = blockFromHFile.heapSize();
                    blockUnpacked = blockFromHFile.unpack(meta, hbr);
                    assertTrue(blockUnpacked.isUnpacked());
                    if (meta.isCompressedOrEncrypted()) {
                        LOG.info("packedHeapsize=" + packedHeapsize + ", unpackedHeadsize=" + blockUnpacked.heapSize());
                        assertFalse(packedHeapsize == blockUnpacked.heapSize());
                        assertTrue("Packed heapSize should be < unpacked heapSize", packedHeapsize < blockUnpacked.heapSize());
                    }
                    ByteBuff actualBuffer = blockUnpacked.getBufferWithoutHeader();
                    if (encoding != DataBlockEncoding.NONE) {
                        // We expect a two-byte big-endian encoding id.
                        assertEquals("Unexpected first byte with " + buildMessageDetails(algo, encoding, pread), Long.toHexString(0), Long.toHexString(actualBuffer.get(0)));
                        assertEquals("Unexpected second byte with " + buildMessageDetails(algo, encoding, pread), Long.toHexString(encoding.getId()), Long.toHexString(actualBuffer.get(1)));
                        actualBuffer.position(2);
                        actualBuffer = actualBuffer.slice();
                    }
                    ByteBuffer expectedBuffer = encodedBlocks.get(blockId);
                    expectedBuffer.rewind();
                    // test if content matches, produce nice message
                    assertBuffersEqual(new SingleByteBuff(expectedBuffer), actualBuffer, algo, encoding, pread);
                    // test serialized blocks
                    for (boolean reuseBuffer : new boolean[] { false, true }) {
                        ByteBuffer serialized = ByteBuffer.allocate(blockFromHFile.getSerializedLength());
                        blockFromHFile.serialize(serialized);
                        HFileBlock deserialized = (HFileBlock) blockFromHFile.getDeserializer().deserialize(new SingleByteBuff(serialized), reuseBuffer, MemoryType.EXCLUSIVE);
                        assertEquals("Serialization did not preserve block state. reuseBuffer=" + reuseBuffer, blockFromHFile, deserialized);
                        // intentional reference comparison
                        if (blockFromHFile != blockUnpacked) {
                            assertEquals("Deserializaed block cannot be unpacked correctly.", blockUnpacked, deserialized.unpack(meta, hbr));
                        }
                    }
                }
                is.close();
            }
        }
    }
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Compression(org.apache.hadoop.hbase.io.compress.Compression) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) Path(org.apache.hadoop.fs.Path) ByteBuffer(java.nio.ByteBuffer) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Aggregations

ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)23 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)9 ByteBuffer (java.nio.ByteBuffer)8 SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)8 Test (org.junit.Test)8 ArrayList (java.util.ArrayList)7 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)5 Path (org.apache.hadoop.fs.Path)5 DataInputStream (java.io.DataInputStream)3 Random (java.util.Random)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 Compression (org.apache.hadoop.hbase.io.compress.Compression)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ByteArrayInputStream (java.io.ByteArrayInputStream)2 DataOutputStream (java.io.DataOutputStream)2 File (java.io.File)2 IOException (java.io.IOException)2 Cell (org.apache.hadoop.hbase.Cell)2 KeyValue (org.apache.hadoop.hbase.KeyValue)2 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)2