Search in sources :

Example 41 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestChecksum method testChecksumInternals.

protected void testChecksumInternals(boolean useTags) throws IOException {
    Compression.Algorithm algo = NONE;
    for (boolean pread : new boolean[] { false, true }) {
        for (int bytesPerChecksum : BYTES_PER_CHECKSUM) {
            Path path = new Path(TEST_UTIL.getDataTestDir(), "checksumChunk_" + algo + bytesPerChecksum);
            FSDataOutputStream os = fs.create(path);
            HFileContext meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).withBytesPerCheckSum(bytesPerChecksum).build();
            HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
            // write one block. The block has data
            // that is at least 6 times more than the checksum chunk size
            long dataSize = 0;
            DataOutputStream dos = hbw.startWriting(BlockType.DATA);
            for (; dataSize < 6 * bytesPerChecksum; ) {
                for (int i = 0; i < 1234; ++i) {
                    dos.writeInt(i);
                    dataSize += 4;
                }
            }
            hbw.writeHeaderAndData(os);
            long totalSize = hbw.getOnDiskSizeWithHeader();
            os.close();
            long expectedChunks = ChecksumUtil.numChunks(dataSize + HConstants.HFILEBLOCK_HEADER_SIZE, bytesPerChecksum);
            LOG.info("testChecksumChunks: pread={}, bytesPerChecksum={}, fileSize={}, " + "dataSize={}, expectedChunks={}, compression={}", pread, bytesPerChecksum, totalSize, dataSize, expectedChunks, algo.toString());
            // Verify hbase checksums.
            assertEquals(true, hfs.useHBaseChecksum());
            // Read data back from file.
            FSDataInputStream is = fs.open(path);
            FSDataInputStream nochecksum = hfs.getNoChecksumFs().open(path);
            meta = new HFileContextBuilder().withCompression(algo).withIncludesMvcc(true).withIncludesTags(useTags).withHBaseCheckSum(true).withBytesPerCheckSum(bytesPerChecksum).build();
            ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(new FSDataInputStreamWrapper(is, nochecksum)).withFileSize(totalSize).withFileSystem(hfs).withFilePath(path).build();
            HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
            HFileBlock b = hbr.readBlockData(0, -1, pread, false, true);
            assertTrue(b.getBufferReadOnly() instanceof SingleByteBuff);
            is.close();
            b.sanityCheck();
            assertEquals(dataSize, b.getUncompressedSizeWithoutHeader());
            // verify that we have the expected number of checksum chunks
            assertEquals(totalSize, HConstants.HFILEBLOCK_HEADER_SIZE + dataSize + expectedChunks * HFileBlock.CHECKSUM_SIZE);
            // assert that we did not encounter hbase checksum verification failures
            assertEquals(0, HFile.getAndResetChecksumFailuresCount());
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Compression(org.apache.hadoop.hbase.io.compress.Compression) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 42 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestChecksum method testVerifyCheckSum.

@Test
public void testVerifyCheckSum() throws IOException {
    int intCount = 10000;
    for (ChecksumType ckt : ChecksumType.values()) {
        Path path = new Path(TEST_UTIL.getDataTestDir(), "checksum" + ckt.getName());
        FSDataOutputStream os = fs.create(path);
        HFileContext meta = new HFileContextBuilder().withChecksumType(ckt).build();
        HFileBlock.Writer hbw = new HFileBlock.Writer(TEST_UTIL.getConfiguration(), null, meta);
        DataOutputStream dos = hbw.startWriting(BlockType.DATA);
        for (int i = 0; i < intCount; ++i) {
            dos.writeInt(i);
        }
        hbw.writeHeaderAndData(os);
        int totalSize = hbw.getOnDiskSizeWithHeader();
        os.close();
        // Use hbase checksums.
        assertEquals(true, hfs.useHBaseChecksum());
        FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
        meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
        ReaderContext context = new ReaderContextBuilder().withInputStreamWrapper(is).withFileSize(totalSize).withFileSystem((HFileSystem) fs).withFilePath(path).build();
        HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(context, meta, ByteBuffAllocator.HEAP, TEST_UTIL.getConfiguration());
        HFileBlock b = hbr.readBlockData(0, -1, false, false, true);
        assertTrue(!b.isSharedMem());
        // verify SingleByteBuff checksum.
        verifySBBCheckSum(b.getBufferReadOnly());
        // verify MultiByteBuff checksum.
        verifyMBBCheckSum(b.getBufferReadOnly());
        ByteBuff data = b.getBufferWithoutHeader();
        for (int i = 0; i < intCount; i++) {
            assertEquals(i, data.getInt());
        }
        try {
            data.getInt();
            fail();
        } catch (BufferUnderflowException e) {
        // expected failure
        }
        assertEquals(0, HFile.getAndResetChecksumFailuresCount());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ChecksumType(org.apache.hadoop.hbase.util.ChecksumType) FSDataInputStreamWrapper(org.apache.hadoop.hbase.io.FSDataInputStreamWrapper) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) BufferUnderflowException(java.nio.BufferUnderflowException) Test(org.junit.Test)

Example 43 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestBlockIOUtils method testByteBufferPositionedReadable.

@Test
public void testByteBufferPositionedReadable() throws IOException {
    assumeTrue("Skip the test because ByteBufferPositionedReadable is not available", isByteBufferPositionedReadable());
    long position = 0;
    int necessaryLen = 10;
    int extraLen = 1;
    int totalLen = necessaryLen + extraLen;
    int firstReadLen = 6;
    int secondReadLen = totalLen - firstReadLen;
    ByteBuffer buf = ByteBuffer.allocate(totalLen);
    ByteBuff bb = new SingleByteBuff(buf);
    MyFSDataInputStream in = mock(MyFSDataInputStream.class);
    when(in.read(position, buf)).thenReturn(firstReadLen);
    when(in.read(firstReadLen, buf)).thenReturn(secondReadLen);
    when(in.hasCapability(anyString())).thenReturn(true);
    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
    assertTrue("Expect true return when reading extra bytes succeeds", ret);
    verify(in).read(position, buf);
    verify(in).read(firstReadLen, buf);
    verify(in).hasCapability(anyString());
    verifyNoMoreInteractions(in);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 44 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestBlockIOUtils method testPositionalReadExtraSucceeded.

@Test
public void testPositionalReadExtraSucceeded() throws IOException {
    long position = 0;
    int bufOffset = 0;
    int necessaryLen = 10;
    int extraLen = 5;
    int totalLen = necessaryLen + extraLen;
    byte[] buf = new byte[totalLen];
    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
    FSDataInputStream in = mock(FSDataInputStream.class);
    when(in.read(position, buf, bufOffset, totalLen)).thenReturn(totalLen);
    when(in.hasCapability(anyString())).thenReturn(false);
    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
    assertTrue("Expect true return when reading extra bytes succeeds", ret);
    verify(in).read(position, buf, bufOffset, totalLen);
    verify(in).hasCapability(anyString());
    verifyNoMoreInteractions(in);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) Test(org.junit.Test)

Example 45 with SingleByteBuff

use of org.apache.hadoop.hbase.nio.SingleByteBuff in project hbase by apache.

the class TestBlockIOUtils method testPositionalReadShortReadCompletesNecessaryAndExtraBytes.

@Test
public void testPositionalReadShortReadCompletesNecessaryAndExtraBytes() throws IOException {
    long position = 0;
    int bufOffset = 0;
    int necessaryLen = 10;
    int extraLen = 5;
    int totalLen = necessaryLen + extraLen;
    byte[] buf = new byte[totalLen];
    ByteBuff bb = new SingleByteBuff(ByteBuffer.wrap(buf, 0, totalLen));
    FSDataInputStream in = mock(FSDataInputStream.class);
    when(in.read(position, buf, bufOffset, totalLen)).thenReturn(5);
    when(in.read(5, buf, 5, 10)).thenReturn(10);
    when(in.hasCapability(anyString())).thenReturn(false);
    boolean ret = BlockIOUtils.preadWithExtra(bb, in, position, necessaryLen, extraLen);
    assertTrue("Expect true return when reading extra bytes succeeds", ret);
    verify(in).read(position, buf, bufOffset, totalLen);
    verify(in).read(5, buf, 5, 10);
    verify(in).hasCapability(anyString());
    verifyNoMoreInteractions(in);
}
Also used : SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) MultiByteBuff(org.apache.hadoop.hbase.nio.MultiByteBuff) SingleByteBuff(org.apache.hadoop.hbase.nio.SingleByteBuff) ByteBuff(org.apache.hadoop.hbase.nio.ByteBuff) Test(org.junit.Test)

Aggregations

SingleByteBuff (org.apache.hadoop.hbase.nio.SingleByteBuff)47 ByteBuffer (java.nio.ByteBuffer)27 Test (org.junit.Test)27 MultiByteBuff (org.apache.hadoop.hbase.nio.MultiByteBuff)21 ByteBuff (org.apache.hadoop.hbase.nio.ByteBuff)19 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)12 ArrayList (java.util.ArrayList)9 KeyValue (org.apache.hadoop.hbase.KeyValue)9 Cell (org.apache.hadoop.hbase.Cell)8 DataOutputStream (java.io.DataOutputStream)7 Path (org.apache.hadoop.fs.Path)7 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HFileContext (org.apache.hadoop.hbase.io.hfile.HFileContext)6 HFileContextBuilder (org.apache.hadoop.hbase.io.hfile.HFileContextBuilder)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 FSDataInputStreamWrapper (org.apache.hadoop.hbase.io.FSDataInputStreamWrapper)5 Compression (org.apache.hadoop.hbase.io.compress.Compression)4 Configuration (org.apache.hadoop.conf.Configuration)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 Random (java.util.Random)2