Search in sources :

Example 21 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestDataBlockEncoders method testFirstKeyInBlockOnSample.

/**
 * Test whether the decompression of first key is implemented correctly.
 * @throws IOException
 */
@Test
public void testFirstKeyInBlockOnSample() throws IOException {
    List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);
    for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
        if (encoding.getEncoder() == null) {
            continue;
        }
        DataBlockEncoder encoder = encoding.getEncoder();
        ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv, getEncodingContext(conf, Compression.Algorithm.NONE, encoding), this.useOffheapData);
        Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer));
        KeyValue firstKv = sampleKv.get(0);
        if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) {
            int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(key, firstKv, false, true);
            fail(String.format("Bug in '%s' commonPrefix %d", encoder.toString(), commonPrefix));
        }
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuffer(java.nio.ByteBuffer) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 22 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestChecksum method testChecksumCorruptionInternals.

protected void testChecksumCorruptionInternals(boolean useTags) throws IOException {
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            LOG.info("testChecksumCorruption: Compression algorithm: " + algo + ", pread=" + pread);
            Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
            long totalSize = 0;
            for (int blockId = 0; blockId < 2; ++blockId) {
                DataOutputStream dos = hbw.startWriting(BlockType.DATA);
                for (int i = 0; i < 1234; ++i) dos.writeInt(i);
                hbw.writeHeaderAndData(os);
                totalSize += hbw.getOnDiskSizeWithHeader();
            }
            os.close();
            // Use hbase checksums.
            assertEquals(true, hfs.useHBaseChecksum());
            // Do a read that purposely introduces checksum verification failures.
            FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).build();
            ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem(fs).withFilePath(path).build();
            HFileBlock.FSReader hbr = new CorruptedFSReaderImpl(context, meta, TEST_UTIL.getConfiguration());
            HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
            b.sanityCheck();
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            ByteBuff bb = b.unpack(meta, hbr).getBufferWithoutHeader();
            DataInputStream in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we encountered hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(1, HFile.getAndResetChecksumFailuresCount());
            validateData(in);
            // requests. Verify that this is correct.
            for (int i = 0; i < HFileBlock.CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD + 1; i++) {
                b = hbr.readBlockData(0, -1, pread, false, true);
                assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
                assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            }
            // The next read should have hbase checksum verification reanabled,
            // we verify this by assertng that there was a hbase-checksum failure.
            b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            assertEquals(1, HFile.getAndResetChecksumFailuresCount());
            // Since the above encountered a checksum failure, we switch
            // back to not checking hbase checksums.
            b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            is.close();
            // Now, use a completely new reader. Switch off hbase checksums in
            // the configuration. In this case, we should not detect
            // any retries within hbase.
            Configuration conf = TEST_UTIL.getConfiguration();
            HFileSystem newfs = new HFileSystem(conf, false);
            assertEquals(false, newfs.useHBaseChecksum());
            is = new FSDataInputStreamWrapper(newfs, path);
            context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem(newfs).withFilePath(path).build();
            hbr = new CorruptedFSReaderImpl(context, meta, conf);
            b = hbr.readBlockData(0, -1, pread, false, true);
            is.close();
            b.sanityCheck();
            b = b.unpack(meta, hbr);
            assertEquals(4936, b.getUncompressedSizeWithoutHeader());
            assertEquals(algo == GZ ? 2173 : 4936, b.getOnDiskSizeWithoutHeader() - b.totalChecksumBytes());
            // read data back from the hfile, exclude header and checksum
            // read back data
            bb = b.getBufferWithoutHeader();
            in = new DataInputStream(new ByteArrayInputStream(bb.array(), bb.arrayOffset(), bb.limit()));
            // assert that we did not encounter hbase checksum verification failures
            // but still used hdfs checksums and read data successfully.
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
            validateData(in);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) DataInputStream(java.io.DataInputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) HFileSystem(org.apache.hadoop.hbase.fs.HFileSystem)

Example 23 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestHFileBlock method testInternals.

private void testInternals() throws IOException {
    final int numBlocks = 5;
    final Configuration conf = TEST_UTIL.getConfiguration();
    if (includesTag) {
        conf.setInt("hfile.format.version", 3);
    }
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : new boolean[] { false, true }) {
            for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
                LOG.info("testDataBlockEncoding: Compression algorithm={}, pread={}, dataBlockEncoder={}", algo.toString(), pread, encoding);
                Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_" + algo + "_" + encoding.toString());
                FSDataOutputStream os = fs.create(path);
                HFileDataBlockEncoder dataBlockEncoder = (encoding != DataBlockEncoding.NONE) ? new HFileDataBlockEncoderImpl(encoding) : NoOpDataBlockEncoder.INSTANCE;
                HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).build();
                HFileBlock.Writer hbw = new HFileBlock.Writer(conf, dataBlockEncoder, meta);
                long totalSize = 0;
                final List<Integer> encodedSizes = new ArrayList<>();
                final List<ByteBuff> encodedBlocks = new ArrayList<>();
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    hbw.startWriting(BlockType.DATA);
                    writeTestKeyValues(hbw, blockId, includesMemstoreTS, includesTag);
                    hbw.writeHeaderAndData(os);
                    int headerLen = HConstants.HFILEBLOCK_HEADER_SIZE;
                    ByteBuff encodedResultWithHeader = hbw.cloneUncompressedBufferWithHeader();
                    final int encodedSize = encodedResultWithHeader.limit() - headerLen;
                    if (encoding != DataBlockEncoding.NONE) {
                        // We need to account for the two-byte encoding algorithm ID that
                        // comes after the 24-byte block header but before encoded KVs.
                        headerLen += DataBlockEncoding.ID_SIZE;
                    }
                    encodedSizes.add(encodedSize);
                    ByteBuff encodedBuf = encodedResultWithHeader.position(headerLen).slice();
                    encodedBlocks.add(encodedBuf);
                    totalSize += hbw.getOnDiskSizeWithHeader();
                }
                os.close();
                FSDataInputStream is = fs.open(path);
                meta = new HFileContextBuilder().withHBaseCheckSum(true).withCompression(algo).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).build();
                ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)).withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build();
                HFileBlock.FSReaderImpl hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf);
                hbr.setDataBlockEncoder(dataBlockEncoder, conf);
                hbr.setIncludesMemStoreTS(includesMemstoreTS);
                HFileBlock blockFromHFile, blockUnpacked;
                int pos = 0;
                for (int blockId = 0; blockId < numBlocks; ++blockId) {
                    blockFromHFile = hbr.readBlockData(pos, -1, pread, false, true);
                    assertEquals(0, HFile.getAndResetChecksumFailuresCount());
                    blockFromHFile.sanityCheck();
                    pos += blockFromHFile.getOnDiskSizeWithHeader();
                    assertEquals((int) encodedSizes.get(blockId), blockFromHFile.getUncompressedSizeWithoutHeader());
                    assertEquals(meta.isCompressedOrEncrypted(), !blockFromHFile.isUnpacked());
                    long packedHeapsize = blockFromHFile.heapSize();
                    blockUnpacked = blockFromHFile.unpack(meta, hbr);
                    assertTrue(blockUnpacked.isUnpacked());
                    if (meta.isCompressedOrEncrypted()) {
                        LOG.info("packedHeapsize=" + packedHeapsize + ", unpackedHeadsize=" + blockUnpacked.heapSize());
                        assertFalse(packedHeapsize == blockUnpacked.heapSize());
                        assertTrue("Packed heapSize should be < unpacked heapSize", packedHeapsize < blockUnpacked.heapSize());
                    }
                    ByteBuff actualBuffer = blockUnpacked.getBufferWithoutHeader();
                    if (encoding != DataBlockEncoding.NONE) {
                        // We expect a two-byte big-endian encoding id.
                        assertEquals("Unexpected first byte with " + buildMessageDetails(algo, encoding, pread), Long.toHexString(0), Long.toHexString(actualBuffer.get(0)));
                        assertEquals("Unexpected second byte with " + buildMessageDetails(algo, encoding, pread), Long.toHexString(encoding.getId()), Long.toHexString(actualBuffer.get(1)));
                        actualBuffer.position(2);
                        actualBuffer = actualBuffer.slice();
                    }
                    ByteBuff expectedBuff = encodedBlocks.get(blockId);
                    expectedBuff.rewind();
                    // test if content matches, produce nice message
                    assertBuffersEqual(expectedBuff, actualBuffer, algo, encoding, pread);
                    // test serialized blocks
                    for (boolean reuseBuffer : new boolean[] { false, true }) {
                        ByteBuffer serialized = ByteBuffer.allocate(blockFromHFile.getSerializedLength());
                        blockFromHFile.serialize(serialized, true);
                        HFileBlock deserialized = (HFileBlock) blockFromHFile.getDeserializer().deserialize(new SingleByteBuff(serialized), HEAP);
                        assertEquals("Serialization did not preserve block state. reuseBuffer=" + reuseBuffer, blockFromHFile, deserialized);
                        // intentional reference comparison
                        if (blockFromHFile != blockUnpacked) {
                            assertEquals("Deserialized block cannot be unpacked correctly.", blockUnpacked, deserialized.unpack(meta, hbr));
                        }
                    }
                    assertRelease(blockUnpacked);
                    if (blockFromHFile != blockUnpacked) {
                        blockFromHFile.release();
                    }
                }
                is.close();
            }
        }
    }
}
Also used : DataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding) Compression(org.apache.hadoop.hbase.io.compress.Compression) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) Path(org.apache.hadoop.fs.Path) ByteBuffer(java.nio.ByteBuffer) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)

Example 24 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestHFileBlock method testPreviousOffsetInternals.

protected void testPreviousOffsetInternals() throws IOException {
    // TODO: parameterize these nested loops.
    Configuration conf = TEST_UTIL.getConfiguration();
    for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
        for (boolean pread : BOOLEAN_VALUES) {
            for (boolean cacheOnWrite : BOOLEAN_VALUES) {
                Random rand = defaultRandom();
                LOG.info("testPreviousOffset: Compression algorithm={}, pread={}, cacheOnWrite={}", algo.toString(), pread, cacheOnWrite);
                Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset");
                List<Long> expectedOffsets = new ArrayList<>();
                List<Long> expectedPrevOffsets = new ArrayList<>();
                List<BlockType> expectedTypes = new ArrayList<>();
                List<ByteBuffer> expectedContents = cacheOnWrite ? new ArrayList<>() : null;
                long totalSize = writeBlocks(TEST_UTIL.getConfiguration(), rand, algo, path, expectedOffsets, expectedPrevOffsets, expectedTypes, expectedContents);
                FSDataInputStream is = fs.open(path);
                HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(true).withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTag).withCompression(algo).build();
                ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is)).withFileSize(totalSize).withFilePath(path).withFileSystem(fs).build();
                HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, alloc, conf);
                long curOffset = 0;
                for (int i = 0; i < NUM_TEST_BLOCKS; ++i) {
                    if (!pread) {
                        assertEquals(is.getPos(), curOffset + (i == 0 ? 0 : HConstants.HFILEBLOCK_HEADER_SIZE));
                    }
                    assertEquals(expectedOffsets.get(i).longValue(), curOffset);
                    if (detailedLogging) {
                        LOG.info("Reading block #" + i + " at offset " + curOffset);
                    }
                    HFileBlock b = hbr.readBlockData(curOffset, -1, pread, false, false);
                    if (detailedLogging) {
                        LOG.info("Block #" + i + ": " + b);
                    }
                    assertEquals("Invalid block #" + i + "'s type:", expectedTypes.get(i), b.getBlockType());
                    assertEquals("Invalid previous block offset for block " + i + " of " + "type " + b.getBlockType() + ":", (long) expectedPrevOffsets.get(i), b.getPrevBlockOffset());
                    b.sanityCheck();
                    assertEquals(curOffset, b.getOffset());
                    // Now re-load this block knowing the on-disk size. This tests a
                    // different branch in the loader.
                    HFileBlock b2 = hbr.readBlockData(curOffset, b.getOnDiskSizeWithHeader(), pread, false, false);
                    b2.sanityCheck();
                    assertEquals(b.getBlockType(), b2.getBlockType());
                    assertEquals(b.getOnDiskSizeWithoutHeader(), b2.getOnDiskSizeWithoutHeader());
                    assertEquals(b.getOnDiskSizeWithHeader(), b2.getOnDiskSizeWithHeader());
                    assertEquals(b.getUncompressedSizeWithoutHeader(), b2.getUncompressedSizeWithoutHeader());
                    assertEquals(b.getPrevBlockOffset(), b2.getPrevBlockOffset());
                    assertEquals(curOffset, b2.getOffset());
                    assertEquals(b.getBytesPerChecksum(), b2.getBytesPerChecksum());
                    assertEquals(b.getOnDiskDataSizeWithHeader(), b2.getOnDiskDataSizeWithHeader());
                    assertEquals(0, HFile.getAndResetChecksumFailuresCount());
                    assertRelease(b2);
                    curOffset += b.getOnDiskSizeWithHeader();
                    if (cacheOnWrite) {
                        // NOTE: cache-on-write testing doesn't actually involve a BlockCache. It simply
                        // verifies that the unpacked value read back off disk matches the unpacked value
                        // generated before writing to disk.
                        HFileBlock newBlock = b.unpack(meta, hbr);
                        // b's buffer has header + data + checksum while
                        // expectedContents have header + data only
                        ByteBuff bufRead = newBlock.getBufferReadOnly();
                        ByteBuffer bufExpected = expectedContents.get(i);
                        byte[] tmp = new byte[bufRead.limit() - newBlock.totalChecksumBytes()];
                        bufRead.get(tmp, 0, tmp.length);
                        boolean bytesAreCorrect = Bytes.compareTo(tmp, 0, tmp.length, bufExpected.array(), bufExpected.arrayOffset(), bufExpected.limit()) == 0;
                        String wrongBytesMsg = "";
                        if (!bytesAreCorrect) {
                            // Optimization: only construct an error message in case we
                            // will need it.
                            wrongBytesMsg = "Expected bytes in block #" + i + " (algo=" + algo + ", pread=" + pread + ", cacheOnWrite=" + cacheOnWrite + "):\n";
                            wrongBytesMsg += Bytes.toStringBinary(bufExpected.array(), bufExpected.arrayOffset(), Math.min(32 + 10, bufExpected.limit())) + ", actual:\n" + Bytes.toStringBinary(bufRead.array(), bufRead.arrayOffset(), Math.min(32 + 10, bufRead.limit()));
                            if (detailedLogging) {
                                LOG.warn("expected header" + HFileBlock.toStringHeader(new SingleByteBuff(bufExpected)) + "\nfound    header" + HFileBlock.toStringHeader(bufRead));
                                LOG.warn("bufread offset " + bufRead.arrayOffset() + " limit " + bufRead.limit() + " expected offset " + bufExpected.arrayOffset() + " limit " + bufExpected.limit());
                                LOG.warn(wrongBytesMsg);
                            }
                        }
                        assertTrue(wrongBytesMsg, bytesAreCorrect);
                        assertRelease(newBlock);
                        if (newBlock != b) {
                            assertRelease(b);
                        }
                    } else {
                        assertRelease(b);
                    }
                }
                assertEquals(curOffset, fs.getFileStatus(path).getLen());
                is.close();
            }
        }
    }
}
Also used : Compression(org.apache.hadoop.hbase.io.compress.Compression) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) ArrayList(java.util.ArrayList) Random(java.util.Random) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) Path(org.apache.hadoop.fs.Path) ByteBuffer(java.nio.ByteBuffer) Algorithm(org.apache.hadoop.hbase.io.compress.Compression.Algorithm) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)

Example 25 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestByteBuffUtils method testCopyAndCompare.

@Test
public void testCopyAndCompare() throws Exception {
    ByteBuffer bb1 = ByteBuffer.allocate(50);
    ByteBuffer bb2 = ByteBuffer.allocate(50);
    MultiByteBuff src = new MultiByteBuff(bb1, bb2);
    for (int i = 0; i < 7; i++) {
        src.putLong(8L);
    }
    src.put((byte) 1);
    src.put((byte) 1);
    ByteBuffer bb3 = ByteBuffer.allocate(50);
    ByteBuffer bb4 = ByteBuffer.allocate(50);
    MultiByteBuff mbbDst = new MultiByteBuff(bb3, bb4);
    // copy from MBB to MBB
    mbbDst.put(0, src, 0, 100);
    int compareTo = ByteBuff.compareTo(src, 0, 100, mbbDst, 0, 100);
    assertTrue(compareTo == 0);
    // Copy from MBB to SBB
    bb3 = ByteBuffer.allocate(100);
    SingleByteBuff sbbDst = new SingleByteBuff(bb3);
    src.rewind();
    sbbDst.put(0, src, 0, 100);
    compareTo = ByteBuff.compareTo(src, 0, 100, sbbDst, 0, 100);
    assertTrue(compareTo == 0);
    // Copy from SBB to SBB
    bb3 = ByteBuffer.allocate(100);
    SingleByteBuff sbb = new SingleByteBuff(bb3);
    for (int i = 0; i < 7; i++) {
        sbb.putLong(8L);
    }
    sbb.put((byte) 1);
    sbb.put((byte) 1);
    bb4 = ByteBuffer.allocate(100);
    sbbDst = new SingleByteBuff(bb4);
    sbbDst.put(0, sbb, 0, 100);
    compareTo = ByteBuff.compareTo(sbb, 0, 100, sbbDst, 0, 100);
    assertTrue(compareTo == 0);
    // copy from SBB to MBB
    sbb.rewind();
    mbbDst = new MultiByteBuff(bb3, bb4);
    mbbDst.rewind();
    mbbDst.put(0, sbb, 0, 100);
    compareTo = ByteBuff.compareTo(sbb, 0, 100, mbbDst, 0, 100);
    assertTrue(compareTo == 0);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)47 ByteBuffer (java.nio.ByteBuffer)27 Test (org.junit.Test)27 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)21 ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)19 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)12 ArrayList (java.util.ArrayList)9 KeyValue (org.apache.hadoop.hbase.KeyValue)9 Cell (org.apache.hadoop.hbase.Cell)8 DataOutputStream (java.io.DataOutputStream)7 Path (org.apache.hadoop.fs.Path)7 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)6 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)5 Compression (org.apache.hadoop.hbase.io.compress.Compression)4 Configuration (org.apache.hadoop.conf.Configuration)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 Random (java.util.Random)2